]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * bsg.c - block layer implementation of the sg v3 interface | |
3 | * | |
4 | * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs | |
5 | * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> | |
6 | * | |
7 | * This file is subject to the terms and conditions of the GNU General Public | |
8 | * License version 2. See the file "COPYING" in the main directory of this | |
9 | * archive for more details. | |
10 | * | |
11 | */ | |
12 | /* | |
13 | * TODO | |
14 | * - Should this get merged, block/scsi_ioctl.c will be migrated into | |
15 | * this file. To keep maintenance down, it's easier to have them | |
16 | * seperated right now. | |
17 | * | |
18 | */ | |
19 | #include <linux/module.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/file.h> | |
22 | #include <linux/blkdev.h> | |
23 | #include <linux/poll.h> | |
24 | #include <linux/cdev.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/uio.h> | |
27 | #include <linux/bsg.h> | |
28 | ||
29 | #include <scsi/scsi.h> | |
30 | #include <scsi/scsi_ioctl.h> | |
31 | #include <scsi/scsi_cmnd.h> | |
32 | #include <scsi/scsi_device.h> | |
33 | #include <scsi/scsi_driver.h> | |
34 | #include <scsi/sg.h> | |
35 | ||
36 | static char bsg_version[] = "block layer sg (bsg) 0.4"; | |
37 | ||
38 | struct bsg_device { | |
39 | request_queue_t *queue; | |
40 | spinlock_t lock; | |
41 | struct list_head busy_list; | |
42 | struct list_head done_list; | |
43 | struct hlist_node dev_list; | |
44 | atomic_t ref_count; | |
45 | int minor; | |
46 | int queued_cmds; | |
47 | int done_cmds; | |
48 | wait_queue_head_t wq_done; | |
49 | wait_queue_head_t wq_free; | |
50 | char name[BUS_ID_SIZE]; | |
51 | int max_queue; | |
52 | unsigned long flags; | |
53 | }; | |
54 | ||
55 | enum { | |
56 | BSG_F_BLOCK = 1, | |
57 | BSG_F_WRITE_PERM = 2, | |
58 | }; | |
59 | ||
60 | #define BSG_DEFAULT_CMDS 64 | |
61 | #define BSG_MAX_DEVS 32768 | |
62 | ||
63 | #undef BSG_DEBUG | |
64 | ||
65 | #ifdef BSG_DEBUG | |
66 | #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args) | |
67 | #else | |
68 | #define dprintk(fmt, args...) | |
69 | #endif | |
70 | ||
71 | #define list_entry_bc(entry) list_entry((entry), struct bsg_command, list) | |
72 | ||
73 | /* | |
74 | * just for testing | |
75 | */ | |
76 | #define BSG_MAJOR (240) | |
77 | ||
78 | static DEFINE_MUTEX(bsg_mutex); | |
79 | static int bsg_device_nr, bsg_minor_idx; | |
80 | ||
81 | #define BSG_LIST_SIZE (8) | |
82 | #define bsg_list_idx(minor) ((minor) & (BSG_LIST_SIZE - 1)) | |
83 | static struct hlist_head bsg_device_list[BSG_LIST_SIZE]; | |
84 | ||
85 | static struct class *bsg_class; | |
86 | static LIST_HEAD(bsg_class_list); | |
87 | ||
88 | static struct kmem_cache *bsg_cmd_cachep; | |
89 | ||
90 | /* | |
91 | * our internal command type | |
92 | */ | |
93 | struct bsg_command { | |
94 | struct bsg_device *bd; | |
95 | struct list_head list; | |
96 | struct request *rq; | |
97 | struct bio *bio; | |
98 | struct bio *bidi_bio; | |
99 | int err; | |
100 | struct sg_io_v4 hdr; | |
101 | struct sg_io_v4 __user *uhdr; | |
102 | char sense[SCSI_SENSE_BUFFERSIZE]; | |
103 | }; | |
104 | ||
105 | static void bsg_free_command(struct bsg_command *bc) | |
106 | { | |
107 | struct bsg_device *bd = bc->bd; | |
108 | unsigned long flags; | |
109 | ||
110 | kmem_cache_free(bsg_cmd_cachep, bc); | |
111 | ||
112 | spin_lock_irqsave(&bd->lock, flags); | |
113 | bd->queued_cmds--; | |
114 | spin_unlock_irqrestore(&bd->lock, flags); | |
115 | ||
116 | wake_up(&bd->wq_free); | |
117 | } | |
118 | ||
119 | static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) | |
120 | { | |
121 | struct bsg_command *bc = ERR_PTR(-EINVAL); | |
122 | ||
123 | spin_lock_irq(&bd->lock); | |
124 | ||
125 | if (bd->queued_cmds >= bd->max_queue) | |
126 | goto out; | |
127 | ||
128 | bd->queued_cmds++; | |
129 | spin_unlock_irq(&bd->lock); | |
130 | ||
131 | bc = kmem_cache_alloc(bsg_cmd_cachep, GFP_USER); | |
132 | if (unlikely(!bc)) { | |
133 | spin_lock_irq(&bd->lock); | |
134 | bd->queued_cmds--; | |
135 | bc = ERR_PTR(-ENOMEM); | |
136 | goto out; | |
137 | } | |
138 | ||
139 | memset(bc, 0, sizeof(*bc)); | |
140 | bc->bd = bd; | |
141 | INIT_LIST_HEAD(&bc->list); | |
142 | dprintk("%s: returning free cmd %p\n", bd->name, bc); | |
143 | return bc; | |
144 | out: | |
145 | spin_unlock_irq(&bd->lock); | |
146 | return bc; | |
147 | } | |
148 | ||
149 | static inline void | |
150 | bsg_del_done_cmd(struct bsg_device *bd, struct bsg_command *bc) | |
151 | { | |
152 | bd->done_cmds--; | |
153 | list_del(&bc->list); | |
154 | } | |
155 | ||
156 | static inline void | |
157 | bsg_add_done_cmd(struct bsg_device *bd, struct bsg_command *bc) | |
158 | { | |
159 | bd->done_cmds++; | |
160 | list_add_tail(&bc->list, &bd->done_list); | |
161 | wake_up(&bd->wq_done); | |
162 | } | |
163 | ||
164 | static inline int bsg_io_schedule(struct bsg_device *bd, int state) | |
165 | { | |
166 | DEFINE_WAIT(wait); | |
167 | int ret = 0; | |
168 | ||
169 | spin_lock_irq(&bd->lock); | |
170 | ||
171 | BUG_ON(bd->done_cmds > bd->queued_cmds); | |
172 | ||
173 | /* | |
174 | * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no | |
175 | * work to do", even though we return -ENOSPC after this same test | |
176 | * during bsg_write() -- there, it means our buffer can't have more | |
177 | * bsg_commands added to it, thus has no space left. | |
178 | */ | |
179 | if (bd->done_cmds == bd->queued_cmds) { | |
180 | ret = -ENODATA; | |
181 | goto unlock; | |
182 | } | |
183 | ||
184 | if (!test_bit(BSG_F_BLOCK, &bd->flags)) { | |
185 | ret = -EAGAIN; | |
186 | goto unlock; | |
187 | } | |
188 | ||
189 | prepare_to_wait(&bd->wq_done, &wait, state); | |
190 | spin_unlock_irq(&bd->lock); | |
191 | io_schedule(); | |
192 | finish_wait(&bd->wq_done, &wait); | |
193 | ||
194 | if ((state == TASK_INTERRUPTIBLE) && signal_pending(current)) | |
195 | ret = -ERESTARTSYS; | |
196 | ||
197 | return ret; | |
198 | unlock: | |
199 | spin_unlock_irq(&bd->lock); | |
200 | return ret; | |
201 | } | |
202 | ||
203 | static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq, | |
204 | struct sg_io_v4 *hdr, int has_write_perm) | |
205 | { | |
206 | memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ | |
207 | ||
208 | if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, | |
209 | hdr->request_len)) | |
210 | return -EFAULT; | |
211 | ||
212 | if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { | |
213 | if (blk_verify_command(rq->cmd, has_write_perm)) | |
214 | return -EPERM; | |
215 | } else if (!capable(CAP_SYS_RAWIO)) | |
216 | return -EPERM; | |
217 | ||
218 | /* | |
219 | * fill in request structure | |
220 | */ | |
221 | rq->cmd_len = hdr->request_len; | |
222 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | |
223 | ||
224 | rq->timeout = (hdr->timeout * HZ) / 1000; | |
225 | if (!rq->timeout) | |
226 | rq->timeout = q->sg_timeout; | |
227 | if (!rq->timeout) | |
228 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; | |
229 | ||
230 | return 0; | |
231 | } | |
232 | ||
233 | /* | |
234 | * Check if sg_io_v4 from user is allowed and valid | |
235 | */ | |
236 | static int | |
237 | bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw) | |
238 | { | |
239 | int ret = 0; | |
240 | ||
241 | if (hdr->guard != 'Q') | |
242 | return -EINVAL; | |
243 | if (hdr->request_len > BLK_MAX_CDB) | |
244 | return -EINVAL; | |
245 | if (hdr->dout_xfer_len > (q->max_sectors << 9) || | |
246 | hdr->din_xfer_len > (q->max_sectors << 9)) | |
247 | return -EIO; | |
248 | ||
249 | switch (hdr->protocol) { | |
250 | case BSG_PROTOCOL_SCSI: | |
251 | switch (hdr->subprotocol) { | |
252 | case BSG_SUB_PROTOCOL_SCSI_CMD: | |
253 | case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: | |
254 | break; | |
255 | default: | |
256 | ret = -EINVAL; | |
257 | } | |
258 | break; | |
259 | default: | |
260 | ret = -EINVAL; | |
261 | } | |
262 | ||
263 | *rw = hdr->dout_xfer_len ? WRITE : READ; | |
264 | return ret; | |
265 | } | |
266 | ||
267 | /* | |
268 | * map sg_io_v4 to a request. | |
269 | */ | |
270 | static struct request * | |
271 | bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) | |
272 | { | |
273 | request_queue_t *q = bd->queue; | |
274 | struct request *rq, *next_rq = NULL; | |
275 | int ret, rw = 0; /* shut up gcc */ | |
276 | unsigned int dxfer_len; | |
277 | void *dxferp = NULL; | |
278 | ||
279 | dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, | |
280 | hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, | |
281 | hdr->din_xfer_len); | |
282 | ||
283 | ret = bsg_validate_sgv4_hdr(q, hdr, &rw); | |
284 | if (ret) | |
285 | return ERR_PTR(ret); | |
286 | ||
287 | /* | |
288 | * map scatter-gather elements seperately and string them to request | |
289 | */ | |
290 | rq = blk_get_request(q, rw, GFP_KERNEL); | |
291 | if (!rq) | |
292 | return ERR_PTR(-ENOMEM); | |
293 | ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM, | |
294 | &bd->flags)); | |
295 | if (ret) | |
296 | goto out; | |
297 | ||
298 | if (rw == WRITE && hdr->din_xfer_len) { | |
299 | if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { | |
300 | ret = -EOPNOTSUPP; | |
301 | goto out; | |
302 | } | |
303 | ||
304 | next_rq = blk_get_request(q, READ, GFP_KERNEL); | |
305 | if (!next_rq) { | |
306 | ret = -ENOMEM; | |
307 | goto out; | |
308 | } | |
309 | rq->next_rq = next_rq; | |
310 | ||
311 | dxferp = (void*)(unsigned long)hdr->din_xferp; | |
312 | ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len); | |
313 | if (ret) | |
314 | goto out; | |
315 | } | |
316 | ||
317 | if (hdr->dout_xfer_len) { | |
318 | dxfer_len = hdr->dout_xfer_len; | |
319 | dxferp = (void*)(unsigned long)hdr->dout_xferp; | |
320 | } else if (hdr->din_xfer_len) { | |
321 | dxfer_len = hdr->din_xfer_len; | |
322 | dxferp = (void*)(unsigned long)hdr->din_xferp; | |
323 | } else | |
324 | dxfer_len = 0; | |
325 | ||
326 | if (dxfer_len) { | |
327 | ret = blk_rq_map_user(q, rq, dxferp, dxfer_len); | |
328 | if (ret) | |
329 | goto out; | |
330 | } | |
331 | return rq; | |
332 | out: | |
333 | blk_put_request(rq); | |
334 | if (next_rq) { | |
335 | blk_rq_unmap_user(next_rq->bio); | |
336 | blk_put_request(next_rq); | |
337 | } | |
338 | return ERR_PTR(ret); | |
339 | } | |
340 | ||
341 | /* | |
342 | * async completion call-back from the block layer, when scsi/ide/whatever | |
343 | * calls end_that_request_last() on a request | |
344 | */ | |
345 | static void bsg_rq_end_io(struct request *rq, int uptodate) | |
346 | { | |
347 | struct bsg_command *bc = rq->end_io_data; | |
348 | struct bsg_device *bd = bc->bd; | |
349 | unsigned long flags; | |
350 | ||
351 | dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", | |
352 | bd->name, rq, bc, bc->bio, uptodate); | |
353 | ||
354 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); | |
355 | ||
356 | spin_lock_irqsave(&bd->lock, flags); | |
357 | list_del(&bc->list); | |
358 | bsg_add_done_cmd(bd, bc); | |
359 | spin_unlock_irqrestore(&bd->lock, flags); | |
360 | } | |
361 | ||
362 | /* | |
363 | * do final setup of a 'bc' and submit the matching 'rq' to the block | |
364 | * layer for io | |
365 | */ | |
366 | static void bsg_add_command(struct bsg_device *bd, request_queue_t *q, | |
367 | struct bsg_command *bc, struct request *rq) | |
368 | { | |
369 | rq->sense = bc->sense; | |
370 | rq->sense_len = 0; | |
371 | ||
372 | /* | |
373 | * add bc command to busy queue and submit rq for io | |
374 | */ | |
375 | bc->rq = rq; | |
376 | bc->bio = rq->bio; | |
377 | if (rq->next_rq) | |
378 | bc->bidi_bio = rq->next_rq->bio; | |
379 | bc->hdr.duration = jiffies; | |
380 | spin_lock_irq(&bd->lock); | |
381 | list_add_tail(&bc->list, &bd->busy_list); | |
382 | spin_unlock_irq(&bd->lock); | |
383 | ||
384 | dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); | |
385 | ||
386 | rq->end_io_data = bc; | |
387 | blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io); | |
388 | } | |
389 | ||
390 | static inline struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) | |
391 | { | |
392 | struct bsg_command *bc = NULL; | |
393 | ||
394 | spin_lock_irq(&bd->lock); | |
395 | if (bd->done_cmds) { | |
396 | bc = list_entry_bc(bd->done_list.next); | |
397 | bsg_del_done_cmd(bd, bc); | |
398 | } | |
399 | spin_unlock_irq(&bd->lock); | |
400 | ||
401 | return bc; | |
402 | } | |
403 | ||
404 | /* | |
405 | * Get a finished command from the done list | |
406 | */ | |
407 | static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) | |
408 | { | |
409 | struct bsg_command *bc; | |
410 | int ret; | |
411 | ||
412 | do { | |
413 | bc = bsg_next_done_cmd(bd); | |
414 | if (bc) | |
415 | break; | |
416 | ||
417 | if (!test_bit(BSG_F_BLOCK, &bd->flags)) { | |
418 | bc = ERR_PTR(-EAGAIN); | |
419 | break; | |
420 | } | |
421 | ||
422 | ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); | |
423 | if (ret) { | |
424 | bc = ERR_PTR(-ERESTARTSYS); | |
425 | break; | |
426 | } | |
427 | } while (1); | |
428 | ||
429 | dprintk("%s: returning done %p\n", bd->name, bc); | |
430 | ||
431 | return bc; | |
432 | } | |
433 | ||
434 | static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, | |
435 | struct bio *bio, struct bio *bidi_bio) | |
436 | { | |
437 | int ret = 0; | |
438 | ||
439 | dprintk("rq %p bio %p %u\n", rq, bio, rq->errors); | |
440 | /* | |
441 | * fill in all the output members | |
442 | */ | |
443 | hdr->device_status = status_byte(rq->errors); | |
444 | hdr->transport_status = host_byte(rq->errors); | |
445 | hdr->driver_status = driver_byte(rq->errors); | |
446 | hdr->info = 0; | |
447 | if (hdr->device_status || hdr->transport_status || hdr->driver_status) | |
448 | hdr->info |= SG_INFO_CHECK; | |
449 | hdr->din_resid = rq->data_len; | |
450 | hdr->response_len = 0; | |
451 | ||
452 | if (rq->sense_len && hdr->response) { | |
453 | int len = min((unsigned int) hdr->max_response_len, | |
454 | rq->sense_len); | |
455 | ||
456 | ret = copy_to_user((void*)(unsigned long)hdr->response, | |
457 | rq->sense, len); | |
458 | if (!ret) | |
459 | hdr->response_len = len; | |
460 | else | |
461 | ret = -EFAULT; | |
462 | } | |
463 | ||
464 | if (rq->next_rq) { | |
465 | blk_rq_unmap_user(bidi_bio); | |
466 | blk_put_request(rq->next_rq); | |
467 | } | |
468 | ||
469 | blk_rq_unmap_user(bio); | |
470 | blk_put_request(rq); | |
471 | ||
472 | return ret; | |
473 | } | |
474 | ||
475 | static int bsg_complete_all_commands(struct bsg_device *bd) | |
476 | { | |
477 | struct bsg_command *bc; | |
478 | int ret, tret; | |
479 | ||
480 | dprintk("%s: entered\n", bd->name); | |
481 | ||
482 | set_bit(BSG_F_BLOCK, &bd->flags); | |
483 | ||
484 | /* | |
485 | * wait for all commands to complete | |
486 | */ | |
487 | ret = 0; | |
488 | do { | |
489 | ret = bsg_io_schedule(bd, TASK_UNINTERRUPTIBLE); | |
490 | /* | |
491 | * look for -ENODATA specifically -- we'll sometimes get | |
492 | * -ERESTARTSYS when we've taken a signal, but we can't | |
493 | * return until we're done freeing the queue, so ignore | |
494 | * it. The signal will get handled when we're done freeing | |
495 | * the bsg_device. | |
496 | */ | |
497 | } while (ret != -ENODATA); | |
498 | ||
499 | /* | |
500 | * discard done commands | |
501 | */ | |
502 | ret = 0; | |
503 | do { | |
504 | spin_lock_irq(&bd->lock); | |
505 | if (!bd->queued_cmds) { | |
506 | spin_unlock_irq(&bd->lock); | |
507 | break; | |
508 | } | |
509 | spin_unlock_irq(&bd->lock); | |
510 | ||
511 | bc = bsg_get_done_cmd(bd); | |
512 | if (IS_ERR(bc)) | |
513 | break; | |
514 | ||
515 | tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, | |
516 | bc->bidi_bio); | |
517 | if (!ret) | |
518 | ret = tret; | |
519 | ||
520 | bsg_free_command(bc); | |
521 | } while (1); | |
522 | ||
523 | return ret; | |
524 | } | |
525 | ||
526 | static ssize_t | |
527 | __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, | |
528 | const struct iovec *iov, ssize_t *bytes_read) | |
529 | { | |
530 | struct bsg_command *bc; | |
531 | int nr_commands, ret; | |
532 | ||
533 | if (count % sizeof(struct sg_io_v4)) | |
534 | return -EINVAL; | |
535 | ||
536 | ret = 0; | |
537 | nr_commands = count / sizeof(struct sg_io_v4); | |
538 | while (nr_commands) { | |
539 | bc = bsg_get_done_cmd(bd); | |
540 | if (IS_ERR(bc)) { | |
541 | ret = PTR_ERR(bc); | |
542 | break; | |
543 | } | |
544 | ||
545 | /* | |
546 | * this is the only case where we need to copy data back | |
547 | * after completing the request. so do that here, | |
548 | * bsg_complete_work() cannot do that for us | |
549 | */ | |
550 | ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, | |
551 | bc->bidi_bio); | |
552 | ||
553 | if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr))) | |
554 | ret = -EFAULT; | |
555 | ||
556 | bsg_free_command(bc); | |
557 | ||
558 | if (ret) | |
559 | break; | |
560 | ||
561 | buf += sizeof(struct sg_io_v4); | |
562 | *bytes_read += sizeof(struct sg_io_v4); | |
563 | nr_commands--; | |
564 | } | |
565 | ||
566 | return ret; | |
567 | } | |
568 | ||
569 | static inline void bsg_set_block(struct bsg_device *bd, struct file *file) | |
570 | { | |
571 | if (file->f_flags & O_NONBLOCK) | |
572 | clear_bit(BSG_F_BLOCK, &bd->flags); | |
573 | else | |
574 | set_bit(BSG_F_BLOCK, &bd->flags); | |
575 | } | |
576 | ||
577 | static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file) | |
578 | { | |
579 | if (file->f_mode & FMODE_WRITE) | |
580 | set_bit(BSG_F_WRITE_PERM, &bd->flags); | |
581 | else | |
582 | clear_bit(BSG_F_WRITE_PERM, &bd->flags); | |
583 | } | |
584 | ||
585 | static inline int err_block_err(int ret) | |
586 | { | |
587 | if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) | |
588 | return 1; | |
589 | ||
590 | return 0; | |
591 | } | |
592 | ||
593 | static ssize_t | |
594 | bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |
595 | { | |
596 | struct bsg_device *bd = file->private_data; | |
597 | int ret; | |
598 | ssize_t bytes_read; | |
599 | ||
600 | dprintk("%s: read %Zd bytes\n", bd->name, count); | |
601 | ||
602 | bsg_set_block(bd, file); | |
603 | bytes_read = 0; | |
604 | ret = __bsg_read(buf, count, bd, NULL, &bytes_read); | |
605 | *ppos = bytes_read; | |
606 | ||
607 | if (!bytes_read || (bytes_read && err_block_err(ret))) | |
608 | bytes_read = ret; | |
609 | ||
610 | return bytes_read; | |
611 | } | |
612 | ||
613 | static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf, | |
614 | size_t count, ssize_t *bytes_read) | |
615 | { | |
616 | struct bsg_command *bc; | |
617 | struct request *rq; | |
618 | int ret, nr_commands; | |
619 | ||
620 | if (count % sizeof(struct sg_io_v4)) | |
621 | return -EINVAL; | |
622 | ||
623 | nr_commands = count / sizeof(struct sg_io_v4); | |
624 | rq = NULL; | |
625 | bc = NULL; | |
626 | ret = 0; | |
627 | while (nr_commands) { | |
628 | request_queue_t *q = bd->queue; | |
629 | ||
630 | bc = bsg_alloc_command(bd); | |
631 | if (IS_ERR(bc)) { | |
632 | ret = PTR_ERR(bc); | |
633 | bc = NULL; | |
634 | break; | |
635 | } | |
636 | ||
637 | bc->uhdr = (struct sg_io_v4 __user *) buf; | |
638 | if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { | |
639 | ret = -EFAULT; | |
640 | break; | |
641 | } | |
642 | ||
643 | /* | |
644 | * get a request, fill in the blanks, and add to request queue | |
645 | */ | |
646 | rq = bsg_map_hdr(bd, &bc->hdr); | |
647 | if (IS_ERR(rq)) { | |
648 | ret = PTR_ERR(rq); | |
649 | rq = NULL; | |
650 | break; | |
651 | } | |
652 | ||
653 | bsg_add_command(bd, q, bc, rq); | |
654 | bc = NULL; | |
655 | rq = NULL; | |
656 | nr_commands--; | |
657 | buf += sizeof(struct sg_io_v4); | |
658 | *bytes_read += sizeof(struct sg_io_v4); | |
659 | } | |
660 | ||
661 | if (bc) | |
662 | bsg_free_command(bc); | |
663 | ||
664 | return ret; | |
665 | } | |
666 | ||
667 | static ssize_t | |
668 | bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) | |
669 | { | |
670 | struct bsg_device *bd = file->private_data; | |
671 | ssize_t bytes_read; | |
672 | int ret; | |
673 | ||
674 | dprintk("%s: write %Zd bytes\n", bd->name, count); | |
675 | ||
676 | bsg_set_block(bd, file); | |
677 | bsg_set_write_perm(bd, file); | |
678 | ||
679 | bytes_read = 0; | |
680 | ret = __bsg_write(bd, buf, count, &bytes_read); | |
681 | *ppos = bytes_read; | |
682 | ||
683 | /* | |
684 | * return bytes written on non-fatal errors | |
685 | */ | |
686 | if (!bytes_read || (bytes_read && err_block_err(ret))) | |
687 | bytes_read = ret; | |
688 | ||
689 | dprintk("%s: returning %Zd\n", bd->name, bytes_read); | |
690 | return bytes_read; | |
691 | } | |
692 | ||
693 | static struct bsg_device *bsg_alloc_device(void) | |
694 | { | |
695 | struct bsg_device *bd; | |
696 | ||
697 | bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); | |
698 | if (unlikely(!bd)) | |
699 | return NULL; | |
700 | ||
701 | spin_lock_init(&bd->lock); | |
702 | ||
703 | bd->max_queue = BSG_DEFAULT_CMDS; | |
704 | ||
705 | INIT_LIST_HEAD(&bd->busy_list); | |
706 | INIT_LIST_HEAD(&bd->done_list); | |
707 | INIT_HLIST_NODE(&bd->dev_list); | |
708 | ||
709 | init_waitqueue_head(&bd->wq_free); | |
710 | init_waitqueue_head(&bd->wq_done); | |
711 | return bd; | |
712 | } | |
713 | ||
714 | static int bsg_put_device(struct bsg_device *bd) | |
715 | { | |
716 | int ret = 0; | |
717 | ||
718 | mutex_lock(&bsg_mutex); | |
719 | ||
720 | if (!atomic_dec_and_test(&bd->ref_count)) | |
721 | goto out; | |
722 | ||
723 | dprintk("%s: tearing down\n", bd->name); | |
724 | ||
725 | /* | |
726 | * close can always block | |
727 | */ | |
728 | set_bit(BSG_F_BLOCK, &bd->flags); | |
729 | ||
730 | /* | |
731 | * correct error detection baddies here again. it's the responsibility | |
732 | * of the app to properly reap commands before close() if it wants | |
733 | * fool-proof error detection | |
734 | */ | |
735 | ret = bsg_complete_all_commands(bd); | |
736 | ||
737 | blk_put_queue(bd->queue); | |
738 | hlist_del(&bd->dev_list); | |
739 | kfree(bd); | |
740 | out: | |
741 | mutex_unlock(&bsg_mutex); | |
742 | return ret; | |
743 | } | |
744 | ||
745 | static struct bsg_device *bsg_add_device(struct inode *inode, | |
746 | struct request_queue *rq, | |
747 | struct file *file) | |
748 | { | |
749 | struct bsg_device *bd = NULL; | |
750 | #ifdef BSG_DEBUG | |
751 | unsigned char buf[32]; | |
752 | #endif | |
753 | ||
754 | bd = bsg_alloc_device(); | |
755 | if (!bd) | |
756 | return ERR_PTR(-ENOMEM); | |
757 | ||
758 | bd->queue = rq; | |
759 | kobject_get(&rq->kobj); | |
760 | bsg_set_block(bd, file); | |
761 | ||
762 | atomic_set(&bd->ref_count, 1); | |
763 | bd->minor = iminor(inode); | |
764 | mutex_lock(&bsg_mutex); | |
765 | hlist_add_head(&bd->dev_list, &bsg_device_list[bsg_list_idx(bd->minor)]); | |
766 | ||
767 | strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1); | |
768 | dprintk("bound to <%s>, max queue %d\n", | |
769 | format_dev_t(buf, inode->i_rdev), bd->max_queue); | |
770 | ||
771 | mutex_unlock(&bsg_mutex); | |
772 | return bd; | |
773 | } | |
774 | ||
775 | static struct bsg_device *__bsg_get_device(int minor) | |
776 | { | |
777 | struct hlist_head *list = &bsg_device_list[bsg_list_idx(minor)]; | |
778 | struct bsg_device *bd = NULL; | |
779 | struct hlist_node *entry; | |
780 | ||
781 | mutex_lock(&bsg_mutex); | |
782 | ||
783 | hlist_for_each(entry, list) { | |
784 | bd = hlist_entry(entry, struct bsg_device, dev_list); | |
785 | if (bd->minor == minor) { | |
786 | atomic_inc(&bd->ref_count); | |
787 | break; | |
788 | } | |
789 | ||
790 | bd = NULL; | |
791 | } | |
792 | ||
793 | mutex_unlock(&bsg_mutex); | |
794 | return bd; | |
795 | } | |
796 | ||
797 | static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) | |
798 | { | |
799 | struct bsg_device *bd = __bsg_get_device(iminor(inode)); | |
800 | struct bsg_class_device *bcd, *__bcd; | |
801 | ||
802 | if (bd) | |
803 | return bd; | |
804 | ||
805 | /* | |
806 | * find the class device | |
807 | */ | |
808 | bcd = NULL; | |
809 | mutex_lock(&bsg_mutex); | |
810 | list_for_each_entry(__bcd, &bsg_class_list, list) { | |
811 | if (__bcd->minor == iminor(inode)) { | |
812 | bcd = __bcd; | |
813 | break; | |
814 | } | |
815 | } | |
816 | mutex_unlock(&bsg_mutex); | |
817 | ||
818 | if (!bcd) | |
819 | return ERR_PTR(-ENODEV); | |
820 | ||
821 | return bsg_add_device(inode, bcd->queue, file); | |
822 | } | |
823 | ||
824 | static int bsg_open(struct inode *inode, struct file *file) | |
825 | { | |
826 | struct bsg_device *bd = bsg_get_device(inode, file); | |
827 | ||
828 | if (IS_ERR(bd)) | |
829 | return PTR_ERR(bd); | |
830 | ||
831 | file->private_data = bd; | |
832 | return 0; | |
833 | } | |
834 | ||
835 | static int bsg_release(struct inode *inode, struct file *file) | |
836 | { | |
837 | struct bsg_device *bd = file->private_data; | |
838 | ||
839 | file->private_data = NULL; | |
840 | return bsg_put_device(bd); | |
841 | } | |
842 | ||
843 | static unsigned int bsg_poll(struct file *file, poll_table *wait) | |
844 | { | |
845 | struct bsg_device *bd = file->private_data; | |
846 | unsigned int mask = 0; | |
847 | ||
848 | poll_wait(file, &bd->wq_done, wait); | |
849 | poll_wait(file, &bd->wq_free, wait); | |
850 | ||
851 | spin_lock_irq(&bd->lock); | |
852 | if (!list_empty(&bd->done_list)) | |
853 | mask |= POLLIN | POLLRDNORM; | |
854 | if (bd->queued_cmds >= bd->max_queue) | |
855 | mask |= POLLOUT; | |
856 | spin_unlock_irq(&bd->lock); | |
857 | ||
858 | return mask; | |
859 | } | |
860 | ||
861 | static int | |
862 | bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | |
863 | unsigned long arg) | |
864 | { | |
865 | struct bsg_device *bd = file->private_data; | |
866 | int __user *uarg = (int __user *) arg; | |
867 | ||
868 | if (!bd) | |
869 | return -ENXIO; | |
870 | ||
871 | switch (cmd) { | |
872 | /* | |
873 | * our own ioctls | |
874 | */ | |
875 | case SG_GET_COMMAND_Q: | |
876 | return put_user(bd->max_queue, uarg); | |
877 | case SG_SET_COMMAND_Q: { | |
878 | int queue; | |
879 | ||
880 | if (get_user(queue, uarg)) | |
881 | return -EFAULT; | |
882 | if (queue < 1) | |
883 | return -EINVAL; | |
884 | ||
885 | spin_lock_irq(&bd->lock); | |
886 | bd->max_queue = queue; | |
887 | spin_unlock_irq(&bd->lock); | |
888 | return 0; | |
889 | } | |
890 | ||
891 | /* | |
892 | * SCSI/sg ioctls | |
893 | */ | |
894 | case SG_GET_VERSION_NUM: | |
895 | case SCSI_IOCTL_GET_IDLUN: | |
896 | case SCSI_IOCTL_GET_BUS_NUMBER: | |
897 | case SG_SET_TIMEOUT: | |
898 | case SG_GET_TIMEOUT: | |
899 | case SG_GET_RESERVED_SIZE: | |
900 | case SG_SET_RESERVED_SIZE: | |
901 | case SG_EMULATED_HOST: | |
902 | case SCSI_IOCTL_SEND_COMMAND: { | |
903 | void __user *uarg = (void __user *) arg; | |
904 | return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg); | |
905 | } | |
906 | case SG_IO: { | |
907 | struct request *rq; | |
908 | struct bio *bio, *bidi_bio = NULL; | |
909 | struct sg_io_v4 hdr; | |
910 | ||
911 | if (copy_from_user(&hdr, uarg, sizeof(hdr))) | |
912 | return -EFAULT; | |
913 | ||
914 | rq = bsg_map_hdr(bd, &hdr); | |
915 | if (IS_ERR(rq)) | |
916 | return PTR_ERR(rq); | |
917 | ||
918 | bio = rq->bio; | |
919 | if (rq->next_rq) | |
920 | bidi_bio = rq->next_rq->bio; | |
921 | blk_execute_rq(bd->queue, NULL, rq, 0); | |
922 | blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); | |
923 | ||
924 | if (copy_to_user(uarg, &hdr, sizeof(hdr))) | |
925 | return -EFAULT; | |
926 | ||
927 | return 0; | |
928 | } | |
929 | /* | |
930 | * block device ioctls | |
931 | */ | |
932 | default: | |
933 | #if 0 | |
934 | return ioctl_by_bdev(bd->bdev, cmd, arg); | |
935 | #else | |
936 | return -ENOTTY; | |
937 | #endif | |
938 | } | |
939 | } | |
940 | ||
941 | static struct file_operations bsg_fops = { | |
942 | .read = bsg_read, | |
943 | .write = bsg_write, | |
944 | .poll = bsg_poll, | |
945 | .open = bsg_open, | |
946 | .release = bsg_release, | |
947 | .ioctl = bsg_ioctl, | |
948 | .owner = THIS_MODULE, | |
949 | }; | |
950 | ||
951 | void bsg_unregister_queue(struct request_queue *q) | |
952 | { | |
953 | struct bsg_class_device *bcd = &q->bsg_dev; | |
954 | ||
955 | if (!bcd->class_dev) | |
956 | return; | |
957 | ||
958 | mutex_lock(&bsg_mutex); | |
959 | sysfs_remove_link(&q->kobj, "bsg"); | |
960 | class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor)); | |
961 | bcd->class_dev = NULL; | |
962 | list_del_init(&bcd->list); | |
963 | bsg_device_nr--; | |
964 | mutex_unlock(&bsg_mutex); | |
965 | } | |
966 | EXPORT_SYMBOL_GPL(bsg_unregister_queue); | |
967 | ||
968 | int bsg_register_queue(struct request_queue *q, const char *name) | |
969 | { | |
970 | struct bsg_class_device *bcd, *__bcd; | |
971 | dev_t dev; | |
972 | int ret = -EMFILE; | |
973 | struct class_device *class_dev = NULL; | |
974 | ||
975 | /* | |
976 | * we need a proper transport to send commands, not a stacked device | |
977 | */ | |
978 | if (!q->request_fn) | |
979 | return 0; | |
980 | ||
981 | bcd = &q->bsg_dev; | |
982 | memset(bcd, 0, sizeof(*bcd)); | |
983 | INIT_LIST_HEAD(&bcd->list); | |
984 | ||
985 | mutex_lock(&bsg_mutex); | |
986 | if (bsg_device_nr == BSG_MAX_DEVS) { | |
987 | printk(KERN_ERR "bsg: too many bsg devices\n"); | |
988 | goto err; | |
989 | } | |
990 | ||
991 | retry: | |
992 | list_for_each_entry(__bcd, &bsg_class_list, list) { | |
993 | if (__bcd->minor == bsg_minor_idx) { | |
994 | bsg_minor_idx++; | |
995 | if (bsg_minor_idx == BSG_MAX_DEVS) | |
996 | bsg_minor_idx = 0; | |
997 | goto retry; | |
998 | } | |
999 | } | |
1000 | ||
1001 | bcd->minor = bsg_minor_idx++; | |
1002 | if (bsg_minor_idx == BSG_MAX_DEVS) | |
1003 | bsg_minor_idx = 0; | |
1004 | ||
1005 | bcd->queue = q; | |
1006 | dev = MKDEV(BSG_MAJOR, bcd->minor); | |
1007 | class_dev = class_device_create(bsg_class, NULL, dev, bcd->dev, "%s", name); | |
1008 | if (IS_ERR(class_dev)) { | |
1009 | ret = PTR_ERR(class_dev); | |
1010 | goto err; | |
1011 | } | |
1012 | bcd->class_dev = class_dev; | |
1013 | ||
1014 | if (q->kobj.dentry) { | |
1015 | ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); | |
1016 | if (ret) | |
1017 | goto err; | |
1018 | } | |
1019 | ||
1020 | list_add_tail(&bcd->list, &bsg_class_list); | |
1021 | bsg_device_nr++; | |
1022 | ||
1023 | mutex_unlock(&bsg_mutex); | |
1024 | return 0; | |
1025 | err: | |
1026 | if (class_dev) | |
1027 | class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor)); | |
1028 | mutex_unlock(&bsg_mutex); | |
1029 | return ret; | |
1030 | } | |
1031 | EXPORT_SYMBOL_GPL(bsg_register_queue); | |
1032 | ||
1033 | static int bsg_add(struct class_device *cl_dev, struct class_interface *cl_intf) | |
1034 | { | |
1035 | int ret; | |
1036 | struct scsi_device *sdp = to_scsi_device(cl_dev->dev); | |
1037 | struct request_queue *rq = sdp->request_queue; | |
1038 | ||
1039 | if (rq->kobj.parent) | |
1040 | ret = bsg_register_queue(rq, kobject_name(rq->kobj.parent)); | |
1041 | else | |
1042 | ret = bsg_register_queue(rq, kobject_name(&sdp->sdev_gendev.kobj)); | |
1043 | return ret; | |
1044 | } | |
1045 | ||
1046 | static void bsg_remove(struct class_device *cl_dev, struct class_interface *cl_intf) | |
1047 | { | |
1048 | bsg_unregister_queue(to_scsi_device(cl_dev->dev)->request_queue); | |
1049 | } | |
1050 | ||
1051 | static struct class_interface bsg_intf = { | |
1052 | .add = bsg_add, | |
1053 | .remove = bsg_remove, | |
1054 | }; | |
1055 | ||
1056 | static struct cdev bsg_cdev = { | |
1057 | .kobj = {.name = "bsg", }, | |
1058 | .owner = THIS_MODULE, | |
1059 | }; | |
1060 | ||
1061 | static int __init bsg_init(void) | |
1062 | { | |
1063 | int ret, i; | |
1064 | ||
1065 | bsg_cmd_cachep = kmem_cache_create("bsg_cmd", | |
1066 | sizeof(struct bsg_command), 0, 0, NULL, NULL); | |
1067 | if (!bsg_cmd_cachep) { | |
1068 | printk(KERN_ERR "bsg: failed creating slab cache\n"); | |
1069 | return -ENOMEM; | |
1070 | } | |
1071 | ||
1072 | for (i = 0; i < BSG_LIST_SIZE; i++) | |
1073 | INIT_HLIST_HEAD(&bsg_device_list[i]); | |
1074 | ||
1075 | bsg_class = class_create(THIS_MODULE, "bsg"); | |
1076 | if (IS_ERR(bsg_class)) { | |
1077 | kmem_cache_destroy(bsg_cmd_cachep); | |
1078 | return PTR_ERR(bsg_class); | |
1079 | } | |
1080 | ||
1081 | ret = register_chrdev_region(MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS, "bsg"); | |
1082 | if (ret) { | |
1083 | kmem_cache_destroy(bsg_cmd_cachep); | |
1084 | class_destroy(bsg_class); | |
1085 | return ret; | |
1086 | } | |
1087 | ||
1088 | cdev_init(&bsg_cdev, &bsg_fops); | |
1089 | ret = cdev_add(&bsg_cdev, MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS); | |
1090 | if (ret) { | |
1091 | kmem_cache_destroy(bsg_cmd_cachep); | |
1092 | class_destroy(bsg_class); | |
1093 | unregister_chrdev_region(MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS); | |
1094 | return ret; | |
1095 | } | |
1096 | ||
1097 | ret = scsi_register_interface(&bsg_intf); | |
1098 | if (ret) { | |
1099 | printk(KERN_ERR "bsg: failed register scsi interface %d\n", ret); | |
1100 | kmem_cache_destroy(bsg_cmd_cachep); | |
1101 | class_destroy(bsg_class); | |
1102 | unregister_chrdev(BSG_MAJOR, "bsg"); | |
1103 | return ret; | |
1104 | } | |
1105 | ||
1106 | printk(KERN_INFO "%s loaded\n", bsg_version); | |
1107 | return 0; | |
1108 | } | |
1109 | ||
1110 | MODULE_AUTHOR("Jens Axboe"); | |
1111 | MODULE_DESCRIPTION("Block layer SGSI generic (sg) driver"); | |
1112 | MODULE_LICENSE("GPL"); | |
1113 | ||
1114 | device_initcall(bsg_init); |