3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 * to allow user process control of SCSI devices.
5 * Development Sponsored by Killy Corp. NY NY
7 * Original driver (sg.c):
8 * Copyright (C) 1992 Lawrence Foard
9 * Version 2 and 3 extensions to driver:
10 * Copyright (C) 1998 - 2014 Douglas Gilbert
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
19 static int sg_version_num
= 30536; /* 2 digits for each component */
20 #define SG_VERSION_STR "3.5.36"
23 * D. P. Gilbert (dgilbert@interlog.com), notes:
24 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
25 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
26 * (otherwise the macros compile to empty statements).
29 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/mtio.h>
38 #include <linux/ioctl.h>
39 #include <linux/slab.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/moduleparam.h>
44 #include <linux/cdev.h>
45 #include <linux/idr.h>
46 #include <linux/seq_file.h>
47 #include <linux/blkdev.h>
48 #include <linux/delay.h>
49 #include <linux/blktrace_api.h>
50 #include <linux/mutex.h>
51 #include <linux/atomic.h>
52 #include <linux/ratelimit.h>
53 #include <linux/uio.h>
54 #include <linux/cred.h> /* for sg_check_file_access() */
57 #include <scsi/scsi_dbg.h>
58 #include <scsi/scsi_host.h>
59 #include <scsi/scsi_driver.h>
60 #include <scsi/scsi_ioctl.h>
63 #include "scsi_logging.h"
65 #ifdef CONFIG_SCSI_PROC_FS
66 #include <linux/proc_fs.h>
67 static char *sg_version_date
= "20140603";
69 static int sg_proc_init(void);
70 static void sg_proc_cleanup(void);
73 #define SG_ALLOW_DIO_DEF 0
75 #define SG_MAX_DEVS 32768
77 /* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type
78 * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater
79 * than 16 bytes are "variable length" whose length is a multiple of 4
81 #define SG_MAX_CDB_SIZE 252
83 #define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
85 int sg_big_buff
= SG_DEF_RESERVED_SIZE
;
86 /* N.B. This variable is readable and writeable via
87 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
88 of this size (or less if there is not enough memory) will be reserved
89 for use by this file descriptor. [Deprecated usage: this variable is also
90 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
91 the kernel (i.e. it is not a module).] */
92 static int def_reserved_size
= -1; /* picks up init parameter */
93 static int sg_allow_dio
= SG_ALLOW_DIO_DEF
;
95 static int scatter_elem_sz
= SG_SCATTER_SZ
;
96 static int scatter_elem_sz_prev
= SG_SCATTER_SZ
;
98 #define SG_SECTOR_SZ 512
100 static int sg_add_device(struct device
*, struct class_interface
*);
101 static void sg_remove_device(struct device
*, struct class_interface
*);
103 static DEFINE_IDR(sg_index_idr
);
104 static DEFINE_RWLOCK(sg_index_lock
); /* Also used to lock
105 file descriptor list for device */
107 static struct class_interface sg_interface
= {
108 .add_dev
= sg_add_device
,
109 .remove_dev
= sg_remove_device
,
112 typedef struct sg_scatter_hold
{ /* holding area for scsi scatter gather info */
113 unsigned short k_use_sg
; /* Count of kernel scatter-gather pieces */
114 unsigned sglist_len
; /* size of malloc'd scatter-gather list ++ */
115 unsigned bufflen
; /* Size of (aggregate) data buffer */
118 char dio_in_use
; /* 0->indirect IO (or mmap), 1->dio */
119 unsigned char cmd_opcode
; /* first byte of command */
122 struct sg_device
; /* forward declarations */
125 typedef struct sg_request
{ /* SG_MAX_QUEUE requests outstanding per file */
126 struct list_head entry
; /* list entry */
127 struct sg_fd
*parentfp
; /* NULL -> not in use */
128 Sg_scatter_hold data
; /* hold buffer, perhaps scatter list */
129 sg_io_hdr_t header
; /* scsi command+info, see <scsi/sg.h> */
130 unsigned char sense_b
[SCSI_SENSE_BUFFERSIZE
];
131 char res_used
; /* 1 -> using reserve buffer, 0 -> not ... */
132 char orphan
; /* 1 -> drop on sight, 0 -> normal */
133 char sg_io_owned
; /* 1 -> packet belongs to SG_IO */
134 /* done protected by rq_list_lock */
135 char done
; /* 0->before bh, 1->before read, 2->read */
138 struct execute_work ew
;
141 typedef struct sg_fd
{ /* holds the state of a file descriptor */
142 struct list_head sfd_siblings
; /* protected by device's sfd_lock */
143 struct sg_device
*parentdp
; /* owning device */
144 wait_queue_head_t read_wait
; /* queue read until command done */
145 rwlock_t rq_list_lock
; /* protect access to list in req_arr */
146 struct mutex f_mutex
; /* protect against changes in this fd */
147 int timeout
; /* defaults to SG_DEFAULT_TIMEOUT */
148 int timeout_user
; /* defaults to SG_DEFAULT_TIMEOUT_USER */
149 Sg_scatter_hold reserve
; /* buffer held for this file descriptor */
150 struct list_head rq_list
; /* head of request list */
151 struct fasync_struct
*async_qp
; /* used by asynchronous notification */
152 Sg_request req_arr
[SG_MAX_QUEUE
]; /* used as singly-linked list */
153 char force_packid
; /* 1 -> pack_id input to read(), 0 -> ignored */
154 char cmd_q
; /* 1 -> allow command queuing, 0 -> don't */
155 unsigned char next_cmd_len
; /* 0: automatic, >0: use on next write() */
156 char keep_orphan
; /* 0 -> drop orphan (def), 1 -> keep for read() */
157 char mmap_called
; /* 0 -> mmap() never called on this fd */
158 char res_in_use
; /* 1 -> 'reserve' array in use */
160 struct execute_work ew
;
163 typedef struct sg_device
{ /* holds the state of each scsi generic device */
164 struct scsi_device
*device
;
165 wait_queue_head_t open_wait
; /* queue open() when O_EXCL present */
166 struct mutex open_rel_lock
; /* held when in open() or release() */
167 int sg_tablesize
; /* adapter's max scatter-gather table size */
168 u32 index
; /* device index number */
169 struct list_head sfds
;
170 rwlock_t sfd_lock
; /* protect access to sfd list */
171 atomic_t detaching
; /* 0->device usable, 1->device detaching */
172 bool exclude
; /* 1->open(O_EXCL) succeeded and is active */
173 int open_cnt
; /* count of opens (perhaps < num(sfds) ) */
174 char sgdebug
; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
175 struct gendisk
*disk
;
176 struct cdev
* cdev
; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
180 /* tasklet or soft irq callback */
181 static void sg_rq_end_io(struct request
*rq
, blk_status_t status
);
182 static int sg_start_req(Sg_request
*srp
, unsigned char *cmd
);
183 static int sg_finish_rem_req(Sg_request
* srp
);
184 static int sg_build_indirect(Sg_scatter_hold
* schp
, Sg_fd
* sfp
, int buff_size
);
185 static ssize_t
sg_new_read(Sg_fd
* sfp
, char __user
*buf
, size_t count
,
187 static ssize_t
sg_new_write(Sg_fd
*sfp
, struct file
*file
,
188 const char __user
*buf
, size_t count
, int blocking
,
189 int read_only
, int sg_io_owned
, Sg_request
**o_srp
);
190 static int sg_common_write(Sg_fd
* sfp
, Sg_request
* srp
,
191 unsigned char *cmnd
, int timeout
, int blocking
);
192 static int sg_read_oxfer(Sg_request
* srp
, char __user
*outp
, int num_read_xfer
);
193 static void sg_remove_scat(Sg_fd
* sfp
, Sg_scatter_hold
* schp
);
194 static void sg_build_reserve(Sg_fd
* sfp
, int req_size
);
195 static void sg_link_reserve(Sg_fd
* sfp
, Sg_request
* srp
, int size
);
196 static void sg_unlink_reserve(Sg_fd
* sfp
, Sg_request
* srp
);
197 static Sg_fd
*sg_add_sfp(Sg_device
* sdp
);
198 static void sg_remove_sfp(struct kref
*);
199 static Sg_request
*sg_get_rq_mark(Sg_fd
* sfp
, int pack_id
);
200 static Sg_request
*sg_add_request(Sg_fd
* sfp
);
201 static int sg_remove_request(Sg_fd
* sfp
, Sg_request
* srp
);
202 static Sg_device
*sg_get_dev(int dev
);
203 static void sg_device_destroy(struct kref
*kref
);
205 #define SZ_SG_HEADER sizeof(struct sg_header)
206 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
207 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
208 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
210 #define sg_printk(prefix, sdp, fmt, a...) \
211 sdev_prefix_printk(prefix, (sdp)->device, \
212 (sdp)->disk->disk_name, fmt, ##a)
215 * The SCSI interfaces that use read() and write() as an asynchronous variant of
216 * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways
217 * to trigger read() and write() calls from various contexts with elevated
218 * privileges. This can lead to kernel memory corruption (e.g. if these
219 * interfaces are called through splice()) and privilege escalation inside
220 * userspace (e.g. if a process with access to such a device passes a file
221 * descriptor to a SUID binary as stdin/stdout/stderr).
223 * This function provides protection for the legacy API by restricting the
226 static int sg_check_file_access(struct file
*filp
, const char *caller
)
228 if (filp
->f_cred
!= current_real_cred()) {
229 pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
230 caller
, task_tgid_vnr(current
), current
->comm
);
233 if (uaccess_kernel()) {
234 pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
235 caller
, task_tgid_vnr(current
), current
->comm
);
241 static int sg_allow_access(struct file
*filp
, unsigned char *cmd
)
243 struct sg_fd
*sfp
= filp
->private_data
;
245 if (sfp
->parentdp
->device
->type
== TYPE_SCANNER
)
248 return blk_verify_command(cmd
, filp
->f_mode
);
252 open_wait(Sg_device
*sdp
, int flags
)
256 if (flags
& O_EXCL
) {
257 while (sdp
->open_cnt
> 0) {
258 mutex_unlock(&sdp
->open_rel_lock
);
259 retval
= wait_event_interruptible(sdp
->open_wait
,
260 (atomic_read(&sdp
->detaching
) ||
262 mutex_lock(&sdp
->open_rel_lock
);
264 if (retval
) /* -ERESTARTSYS */
266 if (atomic_read(&sdp
->detaching
))
270 while (sdp
->exclude
) {
271 mutex_unlock(&sdp
->open_rel_lock
);
272 retval
= wait_event_interruptible(sdp
->open_wait
,
273 (atomic_read(&sdp
->detaching
) ||
275 mutex_lock(&sdp
->open_rel_lock
);
277 if (retval
) /* -ERESTARTSYS */
279 if (atomic_read(&sdp
->detaching
))
287 /* Returns 0 on success, else a negated errno value */
289 sg_open(struct inode
*inode
, struct file
*filp
)
291 int dev
= iminor(inode
);
292 int flags
= filp
->f_flags
;
293 struct request_queue
*q
;
298 nonseekable_open(inode
, filp
);
299 if ((flags
& O_EXCL
) && (O_RDONLY
== (flags
& O_ACCMODE
)))
300 return -EPERM
; /* Can't lock it with read only access */
301 sdp
= sg_get_dev(dev
);
305 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
306 "sg_open: flags=0x%x\n", flags
));
308 /* This driver's module count bumped by fops_get in <linux/fs.h> */
309 /* Prevent the device driver from vanishing while we sleep */
310 retval
= scsi_device_get(sdp
->device
);
314 retval
= scsi_autopm_get_device(sdp
->device
);
318 /* scsi_block_when_processing_errors() may block so bypass
319 * check if O_NONBLOCK. Permits SCSI commands to be issued
320 * during error recovery. Tread carefully. */
321 if (!((flags
& O_NONBLOCK
) ||
322 scsi_block_when_processing_errors(sdp
->device
))) {
324 /* we are in error recovery for this device */
328 mutex_lock(&sdp
->open_rel_lock
);
329 if (flags
& O_NONBLOCK
) {
330 if (flags
& O_EXCL
) {
331 if (sdp
->open_cnt
> 0) {
333 goto error_mutex_locked
;
338 goto error_mutex_locked
;
342 retval
= open_wait(sdp
, flags
);
343 if (retval
) /* -ERESTARTSYS or -ENODEV */
344 goto error_mutex_locked
;
347 /* N.B. at this point we are holding the open_rel_lock */
351 if (sdp
->open_cnt
< 1) { /* no existing opens */
353 q
= sdp
->device
->request_queue
;
354 sdp
->sg_tablesize
= queue_max_segments(q
);
356 sfp
= sg_add_sfp(sdp
);
358 retval
= PTR_ERR(sfp
);
362 filp
->private_data
= sfp
;
364 mutex_unlock(&sdp
->open_rel_lock
);
368 kref_put(&sdp
->d_ref
, sg_device_destroy
);
372 if (flags
& O_EXCL
) {
373 sdp
->exclude
= false; /* undo if error */
374 wake_up_interruptible(&sdp
->open_wait
);
377 mutex_unlock(&sdp
->open_rel_lock
);
379 scsi_autopm_put_device(sdp
->device
);
381 scsi_device_put(sdp
->device
);
385 /* Release resources associated with a successful sg_open()
386 * Returns 0 on success, else a negated errno value */
388 sg_release(struct inode
*inode
, struct file
*filp
)
393 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
395 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
, "sg_release\n"));
397 mutex_lock(&sdp
->open_rel_lock
);
398 scsi_autopm_put_device(sdp
->device
);
399 kref_put(&sfp
->f_ref
, sg_remove_sfp
);
402 /* possibly many open()s waiting on exlude clearing, start many;
403 * only open(O_EXCL)s wait on 0==open_cnt so only start one */
405 sdp
->exclude
= false;
406 wake_up_interruptible_all(&sdp
->open_wait
);
407 } else if (0 == sdp
->open_cnt
) {
408 wake_up_interruptible(&sdp
->open_wait
);
410 mutex_unlock(&sdp
->open_rel_lock
);
415 sg_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
* ppos
)
420 int req_pack_id
= -1;
422 struct sg_header
*old_hdr
= NULL
;
426 * This could cause a response to be stranded. Close the associated
427 * file descriptor to free up any resources being held.
429 retval
= sg_check_file_access(filp
, __func__
);
433 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
435 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
436 "sg_read: count=%d\n", (int) count
));
438 if (!access_ok(VERIFY_WRITE
, buf
, count
))
440 if (sfp
->force_packid
&& (count
>= SZ_SG_HEADER
)) {
441 old_hdr
= kmalloc(SZ_SG_HEADER
, GFP_KERNEL
);
444 if (__copy_from_user(old_hdr
, buf
, SZ_SG_HEADER
)) {
448 if (old_hdr
->reply_len
< 0) {
449 if (count
>= SZ_SG_IO_HDR
) {
450 sg_io_hdr_t
*new_hdr
;
451 new_hdr
= kmalloc(SZ_SG_IO_HDR
, GFP_KERNEL
);
456 retval
=__copy_from_user
457 (new_hdr
, buf
, SZ_SG_IO_HDR
);
458 req_pack_id
= new_hdr
->pack_id
;
466 req_pack_id
= old_hdr
->pack_id
;
468 srp
= sg_get_rq_mark(sfp
, req_pack_id
);
469 if (!srp
) { /* now wait on packet to arrive */
470 if (atomic_read(&sdp
->detaching
)) {
474 if (filp
->f_flags
& O_NONBLOCK
) {
478 retval
= wait_event_interruptible(sfp
->read_wait
,
479 (atomic_read(&sdp
->detaching
) ||
480 (srp
= sg_get_rq_mark(sfp
, req_pack_id
))));
481 if (atomic_read(&sdp
->detaching
)) {
486 /* -ERESTARTSYS as signal hit process */
490 if (srp
->header
.interface_id
!= '\0') {
491 retval
= sg_new_read(sfp
, buf
, count
, srp
);
496 if (old_hdr
== NULL
) {
497 old_hdr
= kmalloc(SZ_SG_HEADER
, GFP_KERNEL
);
503 memset(old_hdr
, 0, SZ_SG_HEADER
);
504 old_hdr
->reply_len
= (int) hp
->timeout
;
505 old_hdr
->pack_len
= old_hdr
->reply_len
; /* old, strange behaviour */
506 old_hdr
->pack_id
= hp
->pack_id
;
507 old_hdr
->twelve_byte
=
508 ((srp
->data
.cmd_opcode
>= 0xc0) && (12 == hp
->cmd_len
)) ? 1 : 0;
509 old_hdr
->target_status
= hp
->masked_status
;
510 old_hdr
->host_status
= hp
->host_status
;
511 old_hdr
->driver_status
= hp
->driver_status
;
512 if ((CHECK_CONDITION
& hp
->masked_status
) ||
513 (DRIVER_SENSE
& hp
->driver_status
))
514 memcpy(old_hdr
->sense_buffer
, srp
->sense_b
,
515 sizeof (old_hdr
->sense_buffer
));
516 switch (hp
->host_status
) {
517 /* This setup of 'result' is for backward compatibility and is best
518 ignored by the user who should use target, host + driver status */
520 case DID_PASSTHROUGH
:
527 old_hdr
->result
= EBUSY
;
534 old_hdr
->result
= EIO
;
537 old_hdr
->result
= (srp
->sense_b
[0] == 0 &&
538 hp
->masked_status
== GOOD
) ? 0 : EIO
;
541 old_hdr
->result
= EIO
;
545 /* Now copy the result back to the user buffer. */
546 if (count
>= SZ_SG_HEADER
) {
547 if (__copy_to_user(buf
, old_hdr
, SZ_SG_HEADER
)) {
552 if (count
> old_hdr
->reply_len
)
553 count
= old_hdr
->reply_len
;
554 if (count
> SZ_SG_HEADER
) {
555 if (sg_read_oxfer(srp
, buf
, count
- SZ_SG_HEADER
)) {
561 count
= (old_hdr
->result
== 0) ? 0 : -EIO
;
562 sg_finish_rem_req(srp
);
563 sg_remove_request(sfp
, srp
);
571 sg_new_read(Sg_fd
* sfp
, char __user
*buf
, size_t count
, Sg_request
* srp
)
573 sg_io_hdr_t
*hp
= &srp
->header
;
577 if (count
< SZ_SG_IO_HDR
) {
582 if ((hp
->mx_sb_len
> 0) && hp
->sbp
) {
583 if ((CHECK_CONDITION
& hp
->masked_status
) ||
584 (DRIVER_SENSE
& hp
->driver_status
)) {
585 int sb_len
= SCSI_SENSE_BUFFERSIZE
;
586 sb_len
= (hp
->mx_sb_len
> sb_len
) ? sb_len
: hp
->mx_sb_len
;
587 len
= 8 + (int) srp
->sense_b
[7]; /* Additional sense length field */
588 len
= (len
> sb_len
) ? sb_len
: len
;
589 if (copy_to_user(hp
->sbp
, srp
->sense_b
, len
)) {
596 if (hp
->masked_status
|| hp
->host_status
|| hp
->driver_status
)
597 hp
->info
|= SG_INFO_CHECK
;
598 if (copy_to_user(buf
, hp
, SZ_SG_IO_HDR
)) {
603 err2
= sg_finish_rem_req(srp
);
604 sg_remove_request(sfp
, srp
);
605 return err
? : err2
? : count
;
609 sg_write(struct file
*filp
, const char __user
*buf
, size_t count
, loff_t
* ppos
)
611 int mxsize
, cmd_size
, k
;
612 int input_size
, blocking
;
613 unsigned char opcode
;
617 struct sg_header old_hdr
;
619 unsigned char cmnd
[SG_MAX_CDB_SIZE
];
622 retval
= sg_check_file_access(filp
, __func__
);
626 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
628 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
629 "sg_write: count=%d\n", (int) count
));
630 if (atomic_read(&sdp
->detaching
))
632 if (!((filp
->f_flags
& O_NONBLOCK
) ||
633 scsi_block_when_processing_errors(sdp
->device
)))
636 if (!access_ok(VERIFY_READ
, buf
, count
))
637 return -EFAULT
; /* protects following copy_from_user()s + get_user()s */
638 if (count
< SZ_SG_HEADER
)
640 if (__copy_from_user(&old_hdr
, buf
, SZ_SG_HEADER
))
642 blocking
= !(filp
->f_flags
& O_NONBLOCK
);
643 if (old_hdr
.reply_len
< 0)
644 return sg_new_write(sfp
, filp
, buf
, count
,
645 blocking
, 0, 0, NULL
);
646 if (count
< (SZ_SG_HEADER
+ 6))
647 return -EIO
; /* The minimum scsi command length is 6 bytes. */
649 if (!(srp
= sg_add_request(sfp
))) {
650 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO
, sdp
,
651 "sg_write: queue full\n"));
655 __get_user(opcode
, buf
);
656 mutex_lock(&sfp
->f_mutex
);
657 if (sfp
->next_cmd_len
> 0) {
658 cmd_size
= sfp
->next_cmd_len
;
659 sfp
->next_cmd_len
= 0; /* reset so only this write() effected */
661 cmd_size
= COMMAND_SIZE(opcode
); /* based on SCSI command group */
662 if ((opcode
>= 0xc0) && old_hdr
.twelve_byte
)
665 mutex_unlock(&sfp
->f_mutex
);
666 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sdp
,
667 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode
, cmd_size
));
668 /* Determine buffer size. */
669 input_size
= count
- cmd_size
;
670 mxsize
= (input_size
> old_hdr
.reply_len
) ? input_size
: old_hdr
.reply_len
;
671 mxsize
-= SZ_SG_HEADER
;
672 input_size
-= SZ_SG_HEADER
;
673 if (input_size
< 0) {
674 sg_remove_request(sfp
, srp
);
675 return -EIO
; /* User did not pass enough bytes for this command. */
678 hp
->interface_id
= '\0'; /* indicator of old interface tunnelled */
679 hp
->cmd_len
= (unsigned char) cmd_size
;
683 hp
->dxfer_direction
= (old_hdr
.reply_len
> SZ_SG_HEADER
) ?
684 SG_DXFER_TO_FROM_DEV
: SG_DXFER_TO_DEV
;
686 hp
->dxfer_direction
= (mxsize
> 0) ? SG_DXFER_FROM_DEV
: SG_DXFER_NONE
;
687 hp
->dxfer_len
= mxsize
;
688 if ((hp
->dxfer_direction
== SG_DXFER_TO_DEV
) ||
689 (hp
->dxfer_direction
== SG_DXFER_TO_FROM_DEV
))
690 hp
->dxferp
= (char __user
*)buf
+ cmd_size
;
694 hp
->timeout
= old_hdr
.reply_len
; /* structure abuse ... */
695 hp
->flags
= input_size
; /* structure abuse ... */
696 hp
->pack_id
= old_hdr
.pack_id
;
698 if (__copy_from_user(cmnd
, buf
, cmd_size
))
701 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
702 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
703 * is a non-zero input_size, so emit a warning.
705 if (hp
->dxfer_direction
== SG_DXFER_TO_FROM_DEV
) {
706 printk_ratelimited(KERN_WARNING
707 "sg_write: data in/out %d/%d bytes "
708 "for SCSI command 0x%x-- guessing "
709 "data in;\n program %s not setting "
710 "count and/or reply_len properly\n",
711 old_hdr
.reply_len
- (int)SZ_SG_HEADER
,
712 input_size
, (unsigned int) cmnd
[0],
715 k
= sg_common_write(sfp
, srp
, cmnd
, sfp
->timeout
, blocking
);
716 return (k
< 0) ? k
: count
;
720 sg_new_write(Sg_fd
*sfp
, struct file
*file
, const char __user
*buf
,
721 size_t count
, int blocking
, int read_only
, int sg_io_owned
,
727 unsigned char cmnd
[SG_MAX_CDB_SIZE
];
729 unsigned long ul_timeout
;
731 if (count
< SZ_SG_IO_HDR
)
733 if (!access_ok(VERIFY_READ
, buf
, count
))
734 return -EFAULT
; /* protects following copy_from_user()s + get_user()s */
736 sfp
->cmd_q
= 1; /* when sg_io_hdr seen, set command queuing on */
737 if (!(srp
= sg_add_request(sfp
))) {
738 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO
, sfp
->parentdp
,
739 "sg_new_write: queue full\n"));
742 srp
->sg_io_owned
= sg_io_owned
;
744 if (__copy_from_user(hp
, buf
, SZ_SG_IO_HDR
)) {
745 sg_remove_request(sfp
, srp
);
748 if (hp
->interface_id
!= 'S') {
749 sg_remove_request(sfp
, srp
);
752 if (hp
->flags
& SG_FLAG_MMAP_IO
) {
753 if (hp
->dxfer_len
> sfp
->reserve
.bufflen
) {
754 sg_remove_request(sfp
, srp
);
755 return -ENOMEM
; /* MMAP_IO size must fit in reserve buffer */
757 if (hp
->flags
& SG_FLAG_DIRECT_IO
) {
758 sg_remove_request(sfp
, srp
);
759 return -EINVAL
; /* either MMAP_IO or DIRECT_IO (not both) */
761 if (sfp
->res_in_use
) {
762 sg_remove_request(sfp
, srp
);
763 return -EBUSY
; /* reserve buffer already being used */
766 ul_timeout
= msecs_to_jiffies(srp
->header
.timeout
);
767 timeout
= (ul_timeout
< INT_MAX
) ? ul_timeout
: INT_MAX
;
768 if ((!hp
->cmdp
) || (hp
->cmd_len
< 6) || (hp
->cmd_len
> sizeof (cmnd
))) {
769 sg_remove_request(sfp
, srp
);
772 if (!access_ok(VERIFY_READ
, hp
->cmdp
, hp
->cmd_len
)) {
773 sg_remove_request(sfp
, srp
);
774 return -EFAULT
; /* protects following copy_from_user()s + get_user()s */
776 if (__copy_from_user(cmnd
, hp
->cmdp
, hp
->cmd_len
)) {
777 sg_remove_request(sfp
, srp
);
780 if (read_only
&& sg_allow_access(file
, cmnd
)) {
781 sg_remove_request(sfp
, srp
);
784 k
= sg_common_write(sfp
, srp
, cmnd
, timeout
, blocking
);
793 sg_common_write(Sg_fd
* sfp
, Sg_request
* srp
,
794 unsigned char *cmnd
, int timeout
, int blocking
)
797 Sg_device
*sdp
= sfp
->parentdp
;
798 sg_io_hdr_t
*hp
= &srp
->header
;
800 srp
->data
.cmd_opcode
= cmnd
[0]; /* hold opcode of command */
802 hp
->masked_status
= 0;
806 hp
->driver_status
= 0;
808 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sfp
->parentdp
,
809 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
810 (int) cmnd
[0], (int) hp
->cmd_len
));
812 if (hp
->dxfer_len
>= SZ_256M
)
815 k
= sg_start_req(srp
, cmnd
);
817 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO
, sfp
->parentdp
,
818 "sg_common_write: start_req err=%d\n", k
));
819 sg_finish_rem_req(srp
);
820 sg_remove_request(sfp
, srp
);
821 return k
; /* probably out of space --> ENOMEM */
823 if (atomic_read(&sdp
->detaching
)) {
825 scsi_req_free_cmd(scsi_req(srp
->rq
));
826 blk_end_request_all(srp
->rq
, BLK_STS_IOERR
);
830 sg_finish_rem_req(srp
);
831 sg_remove_request(sfp
, srp
);
835 hp
->duration
= jiffies_to_msecs(jiffies
);
836 if (hp
->interface_id
!= '\0' && /* v3 (or later) interface */
837 (SG_FLAG_Q_AT_TAIL
& hp
->flags
))
842 srp
->rq
->timeout
= timeout
;
843 kref_get(&sfp
->f_ref
); /* sg_rq_end_io() does kref_put(). */
844 blk_execute_rq_nowait(sdp
->device
->request_queue
, sdp
->disk
,
845 srp
->rq
, at_head
, sg_rq_end_io
);
849 static int srp_done(Sg_fd
*sfp
, Sg_request
*srp
)
854 read_lock_irqsave(&sfp
->rq_list_lock
, flags
);
856 read_unlock_irqrestore(&sfp
->rq_list_lock
, flags
);
860 static int max_sectors_bytes(struct request_queue
*q
)
862 unsigned int max_sectors
= queue_max_sectors(q
);
864 max_sectors
= min_t(unsigned int, max_sectors
, INT_MAX
>> 9);
866 return max_sectors
<< 9;
870 sg_fill_request_table(Sg_fd
*sfp
, sg_req_info_t
*rinfo
)
877 list_for_each_entry(srp
, &sfp
->rq_list
, entry
) {
878 if (val
>= SG_MAX_QUEUE
)
880 rinfo
[val
].req_state
= srp
->done
+ 1;
882 srp
->header
.masked_status
&
883 srp
->header
.host_status
&
884 srp
->header
.driver_status
;
886 rinfo
[val
].duration
=
887 srp
->header
.duration
;
889 ms
= jiffies_to_msecs(jiffies
);
890 rinfo
[val
].duration
=
891 (ms
> srp
->header
.duration
) ?
892 (ms
- srp
->header
.duration
) : 0;
894 rinfo
[val
].orphan
= srp
->orphan
;
895 rinfo
[val
].sg_io_owned
= srp
->sg_io_owned
;
896 rinfo
[val
].pack_id
= srp
->header
.pack_id
;
897 rinfo
[val
].usr_ptr
= srp
->header
.usr_ptr
;
903 sg_ioctl(struct file
*filp
, unsigned int cmd_in
, unsigned long arg
)
905 void __user
*p
= (void __user
*)arg
;
907 int result
, val
, read_only
;
911 unsigned long iflags
;
913 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
916 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
917 "sg_ioctl: cmd=0x%x\n", (int) cmd_in
));
918 read_only
= (O_RDWR
!= (filp
->f_flags
& O_ACCMODE
));
922 if (atomic_read(&sdp
->detaching
))
924 if (!scsi_block_when_processing_errors(sdp
->device
))
926 if (!access_ok(VERIFY_WRITE
, p
, SZ_SG_IO_HDR
))
928 result
= sg_new_write(sfp
, filp
, p
, SZ_SG_IO_HDR
,
929 1, read_only
, 1, &srp
);
932 result
= wait_event_interruptible(sfp
->read_wait
,
933 (srp_done(sfp
, srp
) || atomic_read(&sdp
->detaching
)));
934 if (atomic_read(&sdp
->detaching
))
936 write_lock_irq(&sfp
->rq_list_lock
);
939 write_unlock_irq(&sfp
->rq_list_lock
);
940 result
= sg_new_read(sfp
, p
, SZ_SG_IO_HDR
, srp
);
941 return (result
< 0) ? result
: 0;
944 write_unlock_irq(&sfp
->rq_list_lock
);
945 return result
; /* -ERESTARTSYS because signal hit process */
947 result
= get_user(val
, ip
);
952 if (val
>= mult_frac((s64
)INT_MAX
, USER_HZ
, HZ
))
953 val
= min_t(s64
, mult_frac((s64
)INT_MAX
, USER_HZ
, HZ
),
955 sfp
->timeout_user
= val
;
956 sfp
->timeout
= mult_frac(val
, HZ
, USER_HZ
);
959 case SG_GET_TIMEOUT
: /* N.B. User receives timeout as return value */
960 /* strange ..., for backward compatibility */
961 return sfp
->timeout_user
;
962 case SG_SET_FORCE_LOW_DMA
:
964 * N.B. This ioctl never worked properly, but failed to
965 * return an error value. So returning '0' to keep compability
966 * with legacy applications.
970 return put_user((int) sdp
->device
->host
->unchecked_isa_dma
, ip
);
972 if (!access_ok(VERIFY_WRITE
, p
, sizeof (sg_scsi_id_t
)))
975 sg_scsi_id_t __user
*sg_idp
= p
;
977 if (atomic_read(&sdp
->detaching
))
979 __put_user((int) sdp
->device
->host
->host_no
,
981 __put_user((int) sdp
->device
->channel
,
983 __put_user((int) sdp
->device
->id
, &sg_idp
->scsi_id
);
984 __put_user((int) sdp
->device
->lun
, &sg_idp
->lun
);
985 __put_user((int) sdp
->device
->type
, &sg_idp
->scsi_type
);
986 __put_user((short) sdp
->device
->host
->cmd_per_lun
,
987 &sg_idp
->h_cmd_per_lun
);
988 __put_user((short) sdp
->device
->queue_depth
,
989 &sg_idp
->d_queue_depth
);
990 __put_user(0, &sg_idp
->unused
[0]);
991 __put_user(0, &sg_idp
->unused
[1]);
994 case SG_SET_FORCE_PACK_ID
:
995 result
= get_user(val
, ip
);
998 sfp
->force_packid
= val
? 1 : 0;
1000 case SG_GET_PACK_ID
:
1001 if (!access_ok(VERIFY_WRITE
, ip
, sizeof (int)))
1003 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
1004 list_for_each_entry(srp
, &sfp
->rq_list
, entry
) {
1005 if ((1 == srp
->done
) && (!srp
->sg_io_owned
)) {
1006 read_unlock_irqrestore(&sfp
->rq_list_lock
,
1008 __put_user(srp
->header
.pack_id
, ip
);
1012 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
1015 case SG_GET_NUM_WAITING
:
1016 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
1018 list_for_each_entry(srp
, &sfp
->rq_list
, entry
) {
1019 if ((1 == srp
->done
) && (!srp
->sg_io_owned
))
1022 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
1023 return put_user(val
, ip
);
1024 case SG_GET_SG_TABLESIZE
:
1025 return put_user(sdp
->sg_tablesize
, ip
);
1026 case SG_SET_RESERVED_SIZE
:
1027 result
= get_user(val
, ip
);
1032 val
= min_t(int, val
,
1033 max_sectors_bytes(sdp
->device
->request_queue
));
1034 mutex_lock(&sfp
->f_mutex
);
1035 if (val
!= sfp
->reserve
.bufflen
) {
1036 if (sfp
->mmap_called
||
1038 mutex_unlock(&sfp
->f_mutex
);
1042 sg_remove_scat(sfp
, &sfp
->reserve
);
1043 sg_build_reserve(sfp
, val
);
1045 mutex_unlock(&sfp
->f_mutex
);
1047 case SG_GET_RESERVED_SIZE
:
1048 val
= min_t(int, sfp
->reserve
.bufflen
,
1049 max_sectors_bytes(sdp
->device
->request_queue
));
1050 return put_user(val
, ip
);
1051 case SG_SET_COMMAND_Q
:
1052 result
= get_user(val
, ip
);
1055 sfp
->cmd_q
= val
? 1 : 0;
1057 case SG_GET_COMMAND_Q
:
1058 return put_user((int) sfp
->cmd_q
, ip
);
1059 case SG_SET_KEEP_ORPHAN
:
1060 result
= get_user(val
, ip
);
1063 sfp
->keep_orphan
= val
;
1065 case SG_GET_KEEP_ORPHAN
:
1066 return put_user((int) sfp
->keep_orphan
, ip
);
1067 case SG_NEXT_CMD_LEN
:
1068 result
= get_user(val
, ip
);
1071 if (val
> SG_MAX_CDB_SIZE
)
1073 sfp
->next_cmd_len
= (val
> 0) ? val
: 0;
1075 case SG_GET_VERSION_NUM
:
1076 return put_user(sg_version_num
, ip
);
1077 case SG_GET_ACCESS_COUNT
:
1078 /* faked - we don't have a real access count anymore */
1079 val
= (sdp
->device
? 1 : 0);
1080 return put_user(val
, ip
);
1081 case SG_GET_REQUEST_TABLE
:
1082 if (!access_ok(VERIFY_WRITE
, p
, SZ_SG_REQ_INFO
* SG_MAX_QUEUE
))
1085 sg_req_info_t
*rinfo
;
1087 rinfo
= kzalloc(SZ_SG_REQ_INFO
* SG_MAX_QUEUE
,
1091 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
1092 sg_fill_request_table(sfp
, rinfo
);
1093 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
1094 result
= __copy_to_user(p
, rinfo
,
1095 SZ_SG_REQ_INFO
* SG_MAX_QUEUE
);
1096 result
= result
? -EFAULT
: 0;
1100 case SG_EMULATED_HOST
:
1101 if (atomic_read(&sdp
->detaching
))
1103 return put_user(sdp
->device
->host
->hostt
->emulated
, ip
);
1104 case SCSI_IOCTL_SEND_COMMAND
:
1105 if (atomic_read(&sdp
->detaching
))
1108 unsigned char opcode
= WRITE_6
;
1109 Scsi_Ioctl_Command __user
*siocp
= p
;
1111 if (copy_from_user(&opcode
, siocp
->data
, 1))
1113 if (sg_allow_access(filp
, &opcode
))
1116 return sg_scsi_ioctl(sdp
->device
->request_queue
, NULL
, filp
->f_mode
, p
);
1118 result
= get_user(val
, ip
);
1121 sdp
->sgdebug
= (char) val
;
1124 return put_user(max_sectors_bytes(sdp
->device
->request_queue
),
1127 return blk_trace_setup(sdp
->device
->request_queue
,
1128 sdp
->disk
->disk_name
,
1129 MKDEV(SCSI_GENERIC_MAJOR
, sdp
->index
),
1132 return blk_trace_startstop(sdp
->device
->request_queue
, 1);
1134 return blk_trace_startstop(sdp
->device
->request_queue
, 0);
1135 case BLKTRACETEARDOWN
:
1136 return blk_trace_remove(sdp
->device
->request_queue
);
1137 case SCSI_IOCTL_GET_IDLUN
:
1138 case SCSI_IOCTL_GET_BUS_NUMBER
:
1139 case SCSI_IOCTL_PROBE_HOST
:
1140 case SG_GET_TRANSFORM
:
1142 if (atomic_read(&sdp
->detaching
))
1147 return -EPERM
; /* don't know so take safe approach */
1151 result
= scsi_ioctl_block_when_processing_errors(sdp
->device
,
1152 cmd_in
, filp
->f_flags
& O_NDELAY
);
1155 return scsi_ioctl(sdp
->device
, cmd_in
, p
);
1158 #ifdef CONFIG_COMPAT
1159 static long sg_compat_ioctl(struct file
*filp
, unsigned int cmd_in
, unsigned long arg
)
1163 struct scsi_device
*sdev
;
1165 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
1169 if (sdev
->host
->hostt
->compat_ioctl
) {
1172 ret
= sdev
->host
->hostt
->compat_ioctl(sdev
, cmd_in
, (void __user
*)arg
);
1177 return -ENOIOCTLCMD
;
1182 sg_poll(struct file
*filp
, poll_table
* wait
)
1184 unsigned int res
= 0;
1189 unsigned long iflags
;
1191 sfp
= filp
->private_data
;
1194 sdp
= sfp
->parentdp
;
1197 poll_wait(filp
, &sfp
->read_wait
, wait
);
1198 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
1199 list_for_each_entry(srp
, &sfp
->rq_list
, entry
) {
1200 /* if any read waiting, flag it */
1201 if ((0 == res
) && (1 == srp
->done
) && (!srp
->sg_io_owned
))
1202 res
= POLLIN
| POLLRDNORM
;
1205 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
1207 if (atomic_read(&sdp
->detaching
))
1209 else if (!sfp
->cmd_q
) {
1211 res
|= POLLOUT
| POLLWRNORM
;
1212 } else if (count
< SG_MAX_QUEUE
)
1213 res
|= POLLOUT
| POLLWRNORM
;
1214 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
1215 "sg_poll: res=0x%x\n", (int) res
));
1220 sg_fasync(int fd
, struct file
*filp
, int mode
)
1225 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
1227 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
1228 "sg_fasync: mode=%d\n", mode
));
1230 return fasync_helper(fd
, filp
, mode
, &sfp
->async_qp
);
1234 sg_vma_fault(struct vm_fault
*vmf
)
1236 struct vm_area_struct
*vma
= vmf
->vma
;
1238 unsigned long offset
, len
, sa
;
1239 Sg_scatter_hold
*rsv_schp
;
1242 if ((NULL
== vma
) || (!(sfp
= (Sg_fd
*) vma
->vm_private_data
)))
1243 return VM_FAULT_SIGBUS
;
1244 rsv_schp
= &sfp
->reserve
;
1245 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
1246 if (offset
>= rsv_schp
->bufflen
)
1247 return VM_FAULT_SIGBUS
;
1248 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sfp
->parentdp
,
1249 "sg_vma_fault: offset=%lu, scatg=%d\n",
1250 offset
, rsv_schp
->k_use_sg
));
1252 length
= 1 << (PAGE_SHIFT
+ rsv_schp
->page_order
);
1253 for (k
= 0; k
< rsv_schp
->k_use_sg
&& sa
< vma
->vm_end
; k
++) {
1254 len
= vma
->vm_end
- sa
;
1255 len
= (len
< length
) ? len
: length
;
1257 struct page
*page
= nth_page(rsv_schp
->pages
[k
],
1258 offset
>> PAGE_SHIFT
);
1259 get_page(page
); /* increment page count */
1261 return 0; /* success */
1267 return VM_FAULT_SIGBUS
;
1270 static const struct vm_operations_struct sg_mmap_vm_ops
= {
1271 .fault
= sg_vma_fault
,
1275 sg_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1278 unsigned long req_sz
, len
, sa
;
1279 Sg_scatter_hold
*rsv_schp
;
1283 if ((!filp
) || (!vma
) || (!(sfp
= (Sg_fd
*) filp
->private_data
)))
1285 req_sz
= vma
->vm_end
- vma
->vm_start
;
1286 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sfp
->parentdp
,
1287 "sg_mmap starting, vm_start=%p, len=%d\n",
1288 (void *) vma
->vm_start
, (int) req_sz
));
1290 return -EINVAL
; /* want no offset */
1291 rsv_schp
= &sfp
->reserve
;
1292 mutex_lock(&sfp
->f_mutex
);
1293 if (req_sz
> rsv_schp
->bufflen
) {
1294 ret
= -ENOMEM
; /* cannot map more than reserved buffer */
1299 length
= 1 << (PAGE_SHIFT
+ rsv_schp
->page_order
);
1300 for (k
= 0; k
< rsv_schp
->k_use_sg
&& sa
< vma
->vm_end
; k
++) {
1301 len
= vma
->vm_end
- sa
;
1302 len
= (len
< length
) ? len
: length
;
1306 sfp
->mmap_called
= 1;
1307 vma
->vm_flags
|= VM_IO
| VM_DONTEXPAND
| VM_DONTDUMP
;
1308 vma
->vm_private_data
= sfp
;
1309 vma
->vm_ops
= &sg_mmap_vm_ops
;
1311 mutex_unlock(&sfp
->f_mutex
);
1316 sg_rq_end_io_usercontext(struct work_struct
*work
)
1318 struct sg_request
*srp
= container_of(work
, struct sg_request
, ew
.work
);
1319 struct sg_fd
*sfp
= srp
->parentfp
;
1321 sg_finish_rem_req(srp
);
1322 sg_remove_request(sfp
, srp
);
1323 kref_put(&sfp
->f_ref
, sg_remove_sfp
);
1327 * This function is a "bottom half" handler that is called by the mid
1328 * level when a command is completed (or has failed).
1331 sg_rq_end_io(struct request
*rq
, blk_status_t status
)
1333 struct sg_request
*srp
= rq
->end_io_data
;
1334 struct scsi_request
*req
= scsi_req(rq
);
1337 unsigned long iflags
;
1340 int result
, resid
, done
= 1;
1342 if (WARN_ON(srp
->done
!= 0))
1345 sfp
= srp
->parentfp
;
1346 if (WARN_ON(sfp
== NULL
))
1349 sdp
= sfp
->parentdp
;
1350 if (unlikely(atomic_read(&sdp
->detaching
)))
1351 pr_info("%s: device detaching\n", __func__
);
1354 result
= req
->result
;
1355 resid
= req
->resid_len
;
1357 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sdp
,
1358 "sg_cmd_done: pack_id=%d, res=0x%x\n",
1359 srp
->header
.pack_id
, result
));
1360 srp
->header
.resid
= resid
;
1361 ms
= jiffies_to_msecs(jiffies
);
1362 srp
->header
.duration
= (ms
> srp
->header
.duration
) ?
1363 (ms
- srp
->header
.duration
) : 0;
1365 struct scsi_sense_hdr sshdr
;
1367 srp
->header
.status
= 0xff & result
;
1368 srp
->header
.masked_status
= status_byte(result
);
1369 srp
->header
.msg_status
= msg_byte(result
);
1370 srp
->header
.host_status
= host_byte(result
);
1371 srp
->header
.driver_status
= driver_byte(result
);
1372 if ((sdp
->sgdebug
> 0) &&
1373 ((CHECK_CONDITION
== srp
->header
.masked_status
) ||
1374 (COMMAND_TERMINATED
== srp
->header
.masked_status
)))
1375 __scsi_print_sense(sdp
->device
, __func__
, sense
,
1376 SCSI_SENSE_BUFFERSIZE
);
1378 /* Following if statement is a patch supplied by Eric Youngdale */
1379 if (driver_byte(result
) != 0
1380 && scsi_normalize_sense(sense
, SCSI_SENSE_BUFFERSIZE
, &sshdr
)
1381 && !scsi_sense_is_deferred(&sshdr
)
1382 && sshdr
.sense_key
== UNIT_ATTENTION
1383 && sdp
->device
->removable
) {
1384 /* Detected possible disc change. Set the bit - this */
1385 /* may be used if there are filesystems using this device */
1386 sdp
->device
->changed
= 1;
1391 memcpy(srp
->sense_b
, req
->sense
, SCSI_SENSE_BUFFERSIZE
);
1393 /* Rely on write phase to clean out srp status values, so no "else" */
1396 * Free the request as soon as it is complete so that its resources
1397 * can be reused without waiting for userspace to read() the
1398 * result. But keep the associated bio (if any) around until
1399 * blk_rq_unmap_user() can be called from user context.
1402 scsi_req_free_cmd(scsi_req(rq
));
1403 __blk_put_request(rq
->q
, rq
);
1405 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
1406 if (unlikely(srp
->orphan
)) {
1407 if (sfp
->keep_orphan
)
1408 srp
->sg_io_owned
= 0;
1413 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
1416 /* Now wake up any sg_read() that is waiting for this
1419 wake_up_interruptible(&sfp
->read_wait
);
1420 kill_fasync(&sfp
->async_qp
, SIGPOLL
, POLL_IN
);
1421 kref_put(&sfp
->f_ref
, sg_remove_sfp
);
1423 INIT_WORK(&srp
->ew
.work
, sg_rq_end_io_usercontext
);
1424 schedule_work(&srp
->ew
.work
);
1428 static const struct file_operations sg_fops
= {
1429 .owner
= THIS_MODULE
,
1433 .unlocked_ioctl
= sg_ioctl
,
1434 #ifdef CONFIG_COMPAT
1435 .compat_ioctl
= sg_compat_ioctl
,
1439 .release
= sg_release
,
1440 .fasync
= sg_fasync
,
1441 .llseek
= no_llseek
,
1444 static struct class *sg_sysfs_class
;
1446 static int sg_sysfs_valid
= 0;
1449 sg_alloc(struct gendisk
*disk
, struct scsi_device
*scsidp
)
1451 struct request_queue
*q
= scsidp
->request_queue
;
1453 unsigned long iflags
;
1457 sdp
= kzalloc(sizeof(Sg_device
), GFP_KERNEL
);
1459 sdev_printk(KERN_WARNING
, scsidp
, "%s: kmalloc Sg_device "
1460 "failure\n", __func__
);
1461 return ERR_PTR(-ENOMEM
);
1464 idr_preload(GFP_KERNEL
);
1465 write_lock_irqsave(&sg_index_lock
, iflags
);
1467 error
= idr_alloc(&sg_index_idr
, sdp
, 0, SG_MAX_DEVS
, GFP_NOWAIT
);
1469 if (error
== -ENOSPC
) {
1470 sdev_printk(KERN_WARNING
, scsidp
,
1471 "Unable to attach sg device type=%d, minor number exceeds %d\n",
1472 scsidp
->type
, SG_MAX_DEVS
- 1);
1475 sdev_printk(KERN_WARNING
, scsidp
, "%s: idr "
1476 "allocation Sg_device failure: %d\n",
1483 SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO
, scsidp
,
1484 "sg_alloc: dev=%d \n", k
));
1485 sprintf(disk
->disk_name
, "sg%d", k
);
1486 disk
->first_minor
= k
;
1488 sdp
->device
= scsidp
;
1489 mutex_init(&sdp
->open_rel_lock
);
1490 INIT_LIST_HEAD(&sdp
->sfds
);
1491 init_waitqueue_head(&sdp
->open_wait
);
1492 atomic_set(&sdp
->detaching
, 0);
1493 rwlock_init(&sdp
->sfd_lock
);
1494 sdp
->sg_tablesize
= queue_max_segments(q
);
1496 kref_init(&sdp
->d_ref
);
1500 write_unlock_irqrestore(&sg_index_lock
, iflags
);
1505 return ERR_PTR(error
);
1511 sg_add_device(struct device
*cl_dev
, struct class_interface
*cl_intf
)
1513 struct scsi_device
*scsidp
= to_scsi_device(cl_dev
->parent
);
1514 struct gendisk
*disk
;
1515 Sg_device
*sdp
= NULL
;
1516 struct cdev
* cdev
= NULL
;
1518 unsigned long iflags
;
1520 disk
= alloc_disk(1);
1522 pr_warn("%s: alloc_disk failed\n", __func__
);
1525 disk
->major
= SCSI_GENERIC_MAJOR
;
1528 cdev
= cdev_alloc();
1530 pr_warn("%s: cdev_alloc failed\n", __func__
);
1533 cdev
->owner
= THIS_MODULE
;
1534 cdev
->ops
= &sg_fops
;
1536 sdp
= sg_alloc(disk
, scsidp
);
1538 pr_warn("%s: sg_alloc failed\n", __func__
);
1539 error
= PTR_ERR(sdp
);
1543 error
= cdev_add(cdev
, MKDEV(SCSI_GENERIC_MAJOR
, sdp
->index
), 1);
1548 if (sg_sysfs_valid
) {
1549 struct device
*sg_class_member
;
1551 sg_class_member
= device_create(sg_sysfs_class
, cl_dev
->parent
,
1552 MKDEV(SCSI_GENERIC_MAJOR
,
1554 sdp
, "%s", disk
->disk_name
);
1555 if (IS_ERR(sg_class_member
)) {
1556 pr_err("%s: device_create failed\n", __func__
);
1557 error
= PTR_ERR(sg_class_member
);
1560 error
= sysfs_create_link(&scsidp
->sdev_gendev
.kobj
,
1561 &sg_class_member
->kobj
, "generic");
1563 pr_err("%s: unable to make symlink 'generic' back "
1564 "to sg%d\n", __func__
, sdp
->index
);
1566 pr_warn("%s: sg_sys Invalid\n", __func__
);
1568 sdev_printk(KERN_NOTICE
, scsidp
, "Attached scsi generic sg%d "
1569 "type %d\n", sdp
->index
, scsidp
->type
);
1571 dev_set_drvdata(cl_dev
, sdp
);
1576 write_lock_irqsave(&sg_index_lock
, iflags
);
1577 idr_remove(&sg_index_idr
, sdp
->index
);
1578 write_unlock_irqrestore(&sg_index_lock
, iflags
);
1589 sg_device_destroy(struct kref
*kref
)
1591 struct sg_device
*sdp
= container_of(kref
, struct sg_device
, d_ref
);
1592 unsigned long flags
;
1594 /* CAUTION! Note that the device can still be found via idr_find()
1595 * even though the refcount is 0. Therefore, do idr_remove() BEFORE
1596 * any other cleanup.
1599 write_lock_irqsave(&sg_index_lock
, flags
);
1600 idr_remove(&sg_index_idr
, sdp
->index
);
1601 write_unlock_irqrestore(&sg_index_lock
, flags
);
1604 sg_printk(KERN_INFO
, sdp
, "sg_device_destroy\n"));
1606 put_disk(sdp
->disk
);
1611 sg_remove_device(struct device
*cl_dev
, struct class_interface
*cl_intf
)
1613 struct scsi_device
*scsidp
= to_scsi_device(cl_dev
->parent
);
1614 Sg_device
*sdp
= dev_get_drvdata(cl_dev
);
1615 unsigned long iflags
;
1621 /* want sdp->detaching non-zero as soon as possible */
1622 val
= atomic_inc_return(&sdp
->detaching
);
1624 return; /* only want to do following once per device */
1626 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
1629 read_lock_irqsave(&sdp
->sfd_lock
, iflags
);
1630 list_for_each_entry(sfp
, &sdp
->sfds
, sfd_siblings
) {
1631 wake_up_interruptible_all(&sfp
->read_wait
);
1632 kill_fasync(&sfp
->async_qp
, SIGPOLL
, POLL_HUP
);
1634 wake_up_interruptible_all(&sdp
->open_wait
);
1635 read_unlock_irqrestore(&sdp
->sfd_lock
, iflags
);
1637 sysfs_remove_link(&scsidp
->sdev_gendev
.kobj
, "generic");
1638 device_destroy(sg_sysfs_class
, MKDEV(SCSI_GENERIC_MAJOR
, sdp
->index
));
1639 cdev_del(sdp
->cdev
);
1642 kref_put(&sdp
->d_ref
, sg_device_destroy
);
1645 module_param_named(scatter_elem_sz
, scatter_elem_sz
, int, S_IRUGO
| S_IWUSR
);
1646 module_param_named(def_reserved_size
, def_reserved_size
, int,
1648 module_param_named(allow_dio
, sg_allow_dio
, int, S_IRUGO
| S_IWUSR
);
1650 MODULE_AUTHOR("Douglas Gilbert");
1651 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1652 MODULE_LICENSE("GPL");
1653 MODULE_VERSION(SG_VERSION_STR
);
1654 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR
);
1656 MODULE_PARM_DESC(scatter_elem_sz
, "scatter gather element "
1657 "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
1658 MODULE_PARM_DESC(def_reserved_size
, "size of buffer reserved for each fd");
1659 MODULE_PARM_DESC(allow_dio
, "allow direct I/O (default: 0 (disallow))");
1666 if (scatter_elem_sz
< PAGE_SIZE
) {
1667 scatter_elem_sz
= PAGE_SIZE
;
1668 scatter_elem_sz_prev
= scatter_elem_sz
;
1670 if (def_reserved_size
>= 0)
1671 sg_big_buff
= def_reserved_size
;
1673 def_reserved_size
= sg_big_buff
;
1675 rc
= register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR
, 0),
1679 sg_sysfs_class
= class_create(THIS_MODULE
, "scsi_generic");
1680 if ( IS_ERR(sg_sysfs_class
) ) {
1681 rc
= PTR_ERR(sg_sysfs_class
);
1685 rc
= scsi_register_interface(&sg_interface
);
1687 #ifdef CONFIG_SCSI_PROC_FS
1689 #endif /* CONFIG_SCSI_PROC_FS */
1692 class_destroy(sg_sysfs_class
);
1694 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR
, 0), SG_MAX_DEVS
);
1701 #ifdef CONFIG_SCSI_PROC_FS
1703 #endif /* CONFIG_SCSI_PROC_FS */
1704 scsi_unregister_interface(&sg_interface
);
1705 class_destroy(sg_sysfs_class
);
1707 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR
, 0),
1709 idr_destroy(&sg_index_idr
);
1713 sg_start_req(Sg_request
*srp
, unsigned char *cmd
)
1717 struct scsi_request
*req
;
1718 Sg_fd
*sfp
= srp
->parentfp
;
1719 sg_io_hdr_t
*hp
= &srp
->header
;
1720 int dxfer_len
= (int) hp
->dxfer_len
;
1721 int dxfer_dir
= hp
->dxfer_direction
;
1722 unsigned int iov_count
= hp
->iovec_count
;
1723 Sg_scatter_hold
*req_schp
= &srp
->data
;
1724 Sg_scatter_hold
*rsv_schp
= &sfp
->reserve
;
1725 struct request_queue
*q
= sfp
->parentdp
->device
->request_queue
;
1726 struct rq_map_data
*md
, map_data
;
1727 int rw
= hp
->dxfer_direction
== SG_DXFER_TO_DEV
? WRITE
: READ
;
1728 unsigned char *long_cmdp
= NULL
;
1730 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sfp
->parentdp
,
1731 "sg_start_req: dxfer_len=%d\n",
1734 if (hp
->cmd_len
> BLK_MAX_CDB
) {
1735 long_cmdp
= kzalloc(hp
->cmd_len
, GFP_KERNEL
);
1743 * With scsi-mq enabled, there are a fixed number of preallocated
1744 * requests equal in number to shost->can_queue. If all of the
1745 * preallocated requests are already in use, then using GFP_ATOMIC with
1746 * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
1747 * will cause blk_get_request() to sleep until an active command
1748 * completes, freeing up a request. Neither option is ideal, but
1749 * GFP_KERNEL is the better choice to prevent userspace from getting an
1750 * unexpected EWOULDBLOCK.
1752 * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
1753 * does not sleep except under memory pressure.
1755 rq
= blk_get_request(q
, hp
->dxfer_direction
== SG_DXFER_TO_DEV
?
1756 REQ_OP_SCSI_OUT
: REQ_OP_SCSI_IN
, GFP_KERNEL
);
1763 if (hp
->cmd_len
> BLK_MAX_CDB
)
1764 req
->cmd
= long_cmdp
;
1765 memcpy(req
->cmd
, cmd
, hp
->cmd_len
);
1766 req
->cmd_len
= hp
->cmd_len
;
1769 rq
->end_io_data
= srp
;
1770 req
->retries
= SG_DEFAULT_RETRIES
;
1772 if ((dxfer_len
<= 0) || (dxfer_dir
== SG_DXFER_NONE
))
1775 if (sg_allow_dio
&& hp
->flags
& SG_FLAG_DIRECT_IO
&&
1776 dxfer_dir
!= SG_DXFER_UNKNOWN
&& !iov_count
&&
1777 !sfp
->parentdp
->device
->host
->unchecked_isa_dma
&&
1778 blk_rq_aligned(q
, (unsigned long)hp
->dxferp
, dxfer_len
))
1784 mutex_lock(&sfp
->f_mutex
);
1785 if (dxfer_len
<= rsv_schp
->bufflen
&&
1787 sfp
->res_in_use
= 1;
1788 sg_link_reserve(sfp
, srp
, dxfer_len
);
1789 } else if (hp
->flags
& SG_FLAG_MMAP_IO
) {
1790 res
= -EBUSY
; /* sfp->res_in_use == 1 */
1791 if (dxfer_len
> rsv_schp
->bufflen
)
1793 mutex_unlock(&sfp
->f_mutex
);
1796 res
= sg_build_indirect(req_schp
, sfp
, dxfer_len
);
1798 mutex_unlock(&sfp
->f_mutex
);
1802 mutex_unlock(&sfp
->f_mutex
);
1804 md
->pages
= req_schp
->pages
;
1805 md
->page_order
= req_schp
->page_order
;
1806 md
->nr_entries
= req_schp
->k_use_sg
;
1808 md
->null_mapped
= hp
->dxferp
? 0 : 1;
1809 if (dxfer_dir
== SG_DXFER_TO_FROM_DEV
)
1816 struct iovec
*iov
= NULL
;
1819 res
= import_iovec(rw
, hp
->dxferp
, iov_count
, 0, &iov
, &i
);
1823 iov_iter_truncate(&i
, hp
->dxfer_len
);
1824 if (!iov_iter_count(&i
)) {
1829 res
= blk_rq_map_user_iov(q
, rq
, md
, &i
, GFP_ATOMIC
);
1832 res
= blk_rq_map_user(q
, rq
, md
, hp
->dxferp
,
1833 hp
->dxfer_len
, GFP_ATOMIC
);
1839 req_schp
->dio_in_use
= 1;
1840 hp
->info
|= SG_INFO_DIRECT_IO
;
1847 sg_finish_rem_req(Sg_request
*srp
)
1851 Sg_fd
*sfp
= srp
->parentfp
;
1852 Sg_scatter_hold
*req_schp
= &srp
->data
;
1854 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sfp
->parentdp
,
1855 "sg_finish_rem_req: res_used=%d\n",
1856 (int) srp
->res_used
));
1858 ret
= blk_rq_unmap_user(srp
->bio
);
1861 scsi_req_free_cmd(scsi_req(srp
->rq
));
1862 blk_put_request(srp
->rq
);
1866 sg_unlink_reserve(sfp
, srp
);
1868 sg_remove_scat(sfp
, req_schp
);
1874 sg_build_sgat(Sg_scatter_hold
* schp
, const Sg_fd
* sfp
, int tablesize
)
1876 int sg_bufflen
= tablesize
* sizeof(struct page
*);
1877 gfp_t gfp_flags
= GFP_ATOMIC
| __GFP_NOWARN
;
1879 schp
->pages
= kzalloc(sg_bufflen
, gfp_flags
);
1882 schp
->sglist_len
= sg_bufflen
;
1883 return tablesize
; /* number of scat_gath elements allocated */
1887 sg_build_indirect(Sg_scatter_hold
* schp
, Sg_fd
* sfp
, int buff_size
)
1889 int ret_sz
= 0, i
, k
, rem_sz
, num
, mx_sc_elems
;
1890 int sg_tablesize
= sfp
->parentdp
->sg_tablesize
;
1891 int blk_size
= buff_size
, order
;
1892 gfp_t gfp_mask
= GFP_ATOMIC
| __GFP_COMP
| __GFP_NOWARN
;
1893 struct sg_device
*sdp
= sfp
->parentdp
;
1898 ++blk_size
; /* don't know why */
1899 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1900 blk_size
= ALIGN(blk_size
, SG_SECTOR_SZ
);
1901 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sfp
->parentdp
,
1902 "sg_build_indirect: buff_size=%d, blk_size=%d\n",
1903 buff_size
, blk_size
));
1905 /* N.B. ret_sz carried into this block ... */
1906 mx_sc_elems
= sg_build_sgat(schp
, sfp
, sg_tablesize
);
1907 if (mx_sc_elems
< 0)
1908 return mx_sc_elems
; /* most likely -ENOMEM */
1910 num
= scatter_elem_sz
;
1911 if (unlikely(num
!= scatter_elem_sz_prev
)) {
1912 if (num
< PAGE_SIZE
) {
1913 scatter_elem_sz
= PAGE_SIZE
;
1914 scatter_elem_sz_prev
= PAGE_SIZE
;
1916 scatter_elem_sz_prev
= num
;
1919 if (sdp
->device
->host
->unchecked_isa_dma
)
1920 gfp_mask
|= GFP_DMA
;
1922 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
1923 gfp_mask
|= __GFP_ZERO
;
1925 order
= get_order(num
);
1927 ret_sz
= 1 << (PAGE_SHIFT
+ order
);
1929 for (k
= 0, rem_sz
= blk_size
; rem_sz
> 0 && k
< mx_sc_elems
;
1930 k
++, rem_sz
-= ret_sz
) {
1932 num
= (rem_sz
> scatter_elem_sz_prev
) ?
1933 scatter_elem_sz_prev
: rem_sz
;
1935 schp
->pages
[k
] = alloc_pages(gfp_mask
| __GFP_ZERO
, order
);
1936 if (!schp
->pages
[k
])
1939 if (num
== scatter_elem_sz_prev
) {
1940 if (unlikely(ret_sz
> scatter_elem_sz_prev
)) {
1941 scatter_elem_sz
= ret_sz
;
1942 scatter_elem_sz_prev
= ret_sz
;
1946 SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO
, sfp
->parentdp
,
1947 "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n",
1949 } /* end of for loop */
1951 schp
->page_order
= order
;
1953 SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO
, sfp
->parentdp
,
1954 "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n",
1957 schp
->bufflen
= blk_size
;
1958 if (rem_sz
> 0) /* must have failed */
1962 for (i
= 0; i
< k
; i
++)
1963 __free_pages(schp
->pages
[i
], order
);
1972 sg_remove_scat(Sg_fd
* sfp
, Sg_scatter_hold
* schp
)
1974 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sfp
->parentdp
,
1975 "sg_remove_scat: k_use_sg=%d\n", schp
->k_use_sg
));
1976 if (schp
->pages
&& schp
->sglist_len
> 0) {
1977 if (!schp
->dio_in_use
) {
1980 for (k
= 0; k
< schp
->k_use_sg
&& schp
->pages
[k
]; k
++) {
1982 sg_printk(KERN_INFO
, sfp
->parentdp
,
1983 "sg_remove_scat: k=%d, pg=0x%p\n",
1984 k
, schp
->pages
[k
]));
1985 __free_pages(schp
->pages
[k
], schp
->page_order
);
1991 memset(schp
, 0, sizeof (*schp
));
1995 sg_read_oxfer(Sg_request
* srp
, char __user
*outp
, int num_read_xfer
)
1997 Sg_scatter_hold
*schp
= &srp
->data
;
2000 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, srp
->parentfp
->parentdp
,
2001 "sg_read_oxfer: num_read_xfer=%d\n",
2003 if ((!outp
) || (num_read_xfer
<= 0))
2006 num
= 1 << (PAGE_SHIFT
+ schp
->page_order
);
2007 for (k
= 0; k
< schp
->k_use_sg
&& schp
->pages
[k
]; k
++) {
2008 if (num
> num_read_xfer
) {
2009 if (__copy_to_user(outp
, page_address(schp
->pages
[k
]),
2014 if (__copy_to_user(outp
, page_address(schp
->pages
[k
]),
2017 num_read_xfer
-= num
;
2018 if (num_read_xfer
<= 0)
2028 sg_build_reserve(Sg_fd
* sfp
, int req_size
)
2030 Sg_scatter_hold
*schp
= &sfp
->reserve
;
2032 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sfp
->parentdp
,
2033 "sg_build_reserve: req_size=%d\n", req_size
));
2035 if (req_size
< PAGE_SIZE
)
2036 req_size
= PAGE_SIZE
;
2037 if (0 == sg_build_indirect(schp
, sfp
, req_size
))
2040 sg_remove_scat(sfp
, schp
);
2041 req_size
>>= 1; /* divide by 2 */
2042 } while (req_size
> (PAGE_SIZE
/ 2));
2046 sg_link_reserve(Sg_fd
* sfp
, Sg_request
* srp
, int size
)
2048 Sg_scatter_hold
*req_schp
= &srp
->data
;
2049 Sg_scatter_hold
*rsv_schp
= &sfp
->reserve
;
2053 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, sfp
->parentdp
,
2054 "sg_link_reserve: size=%d\n", size
));
2057 num
= 1 << (PAGE_SHIFT
+ rsv_schp
->page_order
);
2058 for (k
= 0; k
< rsv_schp
->k_use_sg
; k
++) {
2060 req_schp
->k_use_sg
= k
+ 1;
2061 req_schp
->sglist_len
= rsv_schp
->sglist_len
;
2062 req_schp
->pages
= rsv_schp
->pages
;
2064 req_schp
->bufflen
= size
;
2065 req_schp
->page_order
= rsv_schp
->page_order
;
2071 if (k
>= rsv_schp
->k_use_sg
)
2072 SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO
, sfp
->parentdp
,
2073 "sg_link_reserve: BAD size\n"));
2077 sg_unlink_reserve(Sg_fd
* sfp
, Sg_request
* srp
)
2079 Sg_scatter_hold
*req_schp
= &srp
->data
;
2081 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO
, srp
->parentfp
->parentdp
,
2082 "sg_unlink_reserve: req->k_use_sg=%d\n",
2083 (int) req_schp
->k_use_sg
));
2084 req_schp
->k_use_sg
= 0;
2085 req_schp
->bufflen
= 0;
2086 req_schp
->pages
= NULL
;
2087 req_schp
->page_order
= 0;
2088 req_schp
->sglist_len
= 0;
2090 /* Called without mutex lock to avoid deadlock */
2091 sfp
->res_in_use
= 0;
2095 sg_get_rq_mark(Sg_fd
* sfp
, int pack_id
)
2098 unsigned long iflags
;
2100 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
2101 list_for_each_entry(resp
, &sfp
->rq_list
, entry
) {
2102 /* look for requests that are ready + not SG_IO owned */
2103 if ((1 == resp
->done
) && (!resp
->sg_io_owned
) &&
2104 ((-1 == pack_id
) || (resp
->header
.pack_id
== pack_id
))) {
2105 resp
->done
= 2; /* guard against other readers */
2106 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2110 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2114 /* always adds to end of list */
2116 sg_add_request(Sg_fd
* sfp
)
2119 unsigned long iflags
;
2120 Sg_request
*rp
= sfp
->req_arr
;
2122 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
2123 if (!list_empty(&sfp
->rq_list
)) {
2127 for (k
= 0; k
< SG_MAX_QUEUE
; ++k
, ++rp
) {
2131 if (k
>= SG_MAX_QUEUE
)
2134 memset(rp
, 0, sizeof (Sg_request
));
2136 rp
->header
.duration
= jiffies_to_msecs(jiffies
);
2137 list_add_tail(&rp
->entry
, &sfp
->rq_list
);
2138 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2141 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2145 /* Return of 1 for found; 0 for not found */
2147 sg_remove_request(Sg_fd
* sfp
, Sg_request
* srp
)
2149 unsigned long iflags
;
2152 if (!sfp
|| !srp
|| list_empty(&sfp
->rq_list
))
2154 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
2155 if (!list_empty(&srp
->entry
)) {
2156 list_del(&srp
->entry
);
2157 srp
->parentfp
= NULL
;
2160 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2165 sg_add_sfp(Sg_device
* sdp
)
2168 unsigned long iflags
;
2171 sfp
= kzalloc(sizeof(*sfp
), GFP_ATOMIC
| __GFP_NOWARN
);
2173 return ERR_PTR(-ENOMEM
);
2175 init_waitqueue_head(&sfp
->read_wait
);
2176 rwlock_init(&sfp
->rq_list_lock
);
2177 INIT_LIST_HEAD(&sfp
->rq_list
);
2178 kref_init(&sfp
->f_ref
);
2179 mutex_init(&sfp
->f_mutex
);
2180 sfp
->timeout
= SG_DEFAULT_TIMEOUT
;
2181 sfp
->timeout_user
= SG_DEFAULT_TIMEOUT_USER
;
2182 sfp
->force_packid
= SG_DEF_FORCE_PACK_ID
;
2183 sfp
->cmd_q
= SG_DEF_COMMAND_Q
;
2184 sfp
->keep_orphan
= SG_DEF_KEEP_ORPHAN
;
2185 sfp
->parentdp
= sdp
;
2186 write_lock_irqsave(&sdp
->sfd_lock
, iflags
);
2187 if (atomic_read(&sdp
->detaching
)) {
2188 write_unlock_irqrestore(&sdp
->sfd_lock
, iflags
);
2189 return ERR_PTR(-ENODEV
);
2191 list_add_tail(&sfp
->sfd_siblings
, &sdp
->sfds
);
2192 write_unlock_irqrestore(&sdp
->sfd_lock
, iflags
);
2193 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
2194 "sg_add_sfp: sfp=0x%p\n", sfp
));
2195 if (unlikely(sg_big_buff
!= def_reserved_size
))
2196 sg_big_buff
= def_reserved_size
;
2198 bufflen
= min_t(int, sg_big_buff
,
2199 max_sectors_bytes(sdp
->device
->request_queue
));
2200 sg_build_reserve(sfp
, bufflen
);
2201 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO
, sdp
,
2202 "sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2203 sfp
->reserve
.bufflen
,
2204 sfp
->reserve
.k_use_sg
));
2206 kref_get(&sdp
->d_ref
);
2207 __module_get(THIS_MODULE
);
2212 sg_remove_sfp_usercontext(struct work_struct
*work
)
2214 struct sg_fd
*sfp
= container_of(work
, struct sg_fd
, ew
.work
);
2215 struct sg_device
*sdp
= sfp
->parentdp
;
2217 unsigned long iflags
;
2219 /* Cleanup any responses which were never read(). */
2220 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
2221 while (!list_empty(&sfp
->rq_list
)) {
2222 srp
= list_first_entry(&sfp
->rq_list
, Sg_request
, entry
);
2223 sg_finish_rem_req(srp
);
2224 list_del(&srp
->entry
);
2225 srp
->parentfp
= NULL
;
2227 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2229 if (sfp
->reserve
.bufflen
> 0) {
2230 SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO
, sdp
,
2231 "sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2232 (int) sfp
->reserve
.bufflen
,
2233 (int) sfp
->reserve
.k_use_sg
));
2234 sg_remove_scat(sfp
, &sfp
->reserve
);
2237 SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO
, sdp
,
2238 "sg_remove_sfp: sfp=0x%p\n", sfp
));
2241 scsi_device_put(sdp
->device
);
2242 kref_put(&sdp
->d_ref
, sg_device_destroy
);
2243 module_put(THIS_MODULE
);
2247 sg_remove_sfp(struct kref
*kref
)
2249 struct sg_fd
*sfp
= container_of(kref
, struct sg_fd
, f_ref
);
2250 struct sg_device
*sdp
= sfp
->parentdp
;
2251 unsigned long iflags
;
2253 write_lock_irqsave(&sdp
->sfd_lock
, iflags
);
2254 list_del(&sfp
->sfd_siblings
);
2255 write_unlock_irqrestore(&sdp
->sfd_lock
, iflags
);
2257 INIT_WORK(&sfp
->ew
.work
, sg_remove_sfp_usercontext
);
2258 schedule_work(&sfp
->ew
.work
);
2261 #ifdef CONFIG_SCSI_PROC_FS
2263 sg_idr_max_id(int id
, void *p
, void *data
)
2277 unsigned long iflags
;
2279 read_lock_irqsave(&sg_index_lock
, iflags
);
2280 idr_for_each(&sg_index_idr
, sg_idr_max_id
, &k
);
2281 read_unlock_irqrestore(&sg_index_lock
, iflags
);
2282 return k
+ 1; /* origin 1 */
2286 /* must be called with sg_index_lock held */
2287 static Sg_device
*sg_lookup_dev(int dev
)
2289 return idr_find(&sg_index_idr
, dev
);
2295 struct sg_device
*sdp
;
2296 unsigned long flags
;
2298 read_lock_irqsave(&sg_index_lock
, flags
);
2299 sdp
= sg_lookup_dev(dev
);
2301 sdp
= ERR_PTR(-ENXIO
);
2302 else if (atomic_read(&sdp
->detaching
)) {
2303 /* If sdp->detaching, then the refcount may already be 0, in
2304 * which case it would be a bug to do kref_get().
2306 sdp
= ERR_PTR(-ENODEV
);
2308 kref_get(&sdp
->d_ref
);
2309 read_unlock_irqrestore(&sg_index_lock
, flags
);
2314 #ifdef CONFIG_SCSI_PROC_FS
2316 static struct proc_dir_entry
*sg_proc_sgp
= NULL
;
2318 static char sg_proc_sg_dirname
[] = "scsi/sg";
2320 static int sg_proc_seq_show_int(struct seq_file
*s
, void *v
);
2322 static int sg_proc_single_open_adio(struct inode
*inode
, struct file
*file
);
2323 static ssize_t
sg_proc_write_adio(struct file
*filp
, const char __user
*buffer
,
2324 size_t count
, loff_t
*off
);
2325 static const struct file_operations adio_fops
= {
2326 .owner
= THIS_MODULE
,
2327 .open
= sg_proc_single_open_adio
,
2329 .llseek
= seq_lseek
,
2330 .write
= sg_proc_write_adio
,
2331 .release
= single_release
,
2334 static int sg_proc_single_open_dressz(struct inode
*inode
, struct file
*file
);
2335 static ssize_t
sg_proc_write_dressz(struct file
*filp
,
2336 const char __user
*buffer
, size_t count
, loff_t
*off
);
2337 static const struct file_operations dressz_fops
= {
2338 .owner
= THIS_MODULE
,
2339 .open
= sg_proc_single_open_dressz
,
2341 .llseek
= seq_lseek
,
2342 .write
= sg_proc_write_dressz
,
2343 .release
= single_release
,
2346 static int sg_proc_seq_show_version(struct seq_file
*s
, void *v
);
2347 static int sg_proc_single_open_version(struct inode
*inode
, struct file
*file
);
2348 static const struct file_operations version_fops
= {
2349 .owner
= THIS_MODULE
,
2350 .open
= sg_proc_single_open_version
,
2352 .llseek
= seq_lseek
,
2353 .release
= single_release
,
2356 static int sg_proc_seq_show_devhdr(struct seq_file
*s
, void *v
);
2357 static int sg_proc_single_open_devhdr(struct inode
*inode
, struct file
*file
);
2358 static const struct file_operations devhdr_fops
= {
2359 .owner
= THIS_MODULE
,
2360 .open
= sg_proc_single_open_devhdr
,
2362 .llseek
= seq_lseek
,
2363 .release
= single_release
,
2366 static int sg_proc_seq_show_dev(struct seq_file
*s
, void *v
);
2367 static int sg_proc_open_dev(struct inode
*inode
, struct file
*file
);
2368 static void * dev_seq_start(struct seq_file
*s
, loff_t
*pos
);
2369 static void * dev_seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
);
2370 static void dev_seq_stop(struct seq_file
*s
, void *v
);
2371 static const struct file_operations dev_fops
= {
2372 .owner
= THIS_MODULE
,
2373 .open
= sg_proc_open_dev
,
2375 .llseek
= seq_lseek
,
2376 .release
= seq_release
,
2378 static const struct seq_operations dev_seq_ops
= {
2379 .start
= dev_seq_start
,
2380 .next
= dev_seq_next
,
2381 .stop
= dev_seq_stop
,
2382 .show
= sg_proc_seq_show_dev
,
2385 static int sg_proc_seq_show_devstrs(struct seq_file
*s
, void *v
);
2386 static int sg_proc_open_devstrs(struct inode
*inode
, struct file
*file
);
2387 static const struct file_operations devstrs_fops
= {
2388 .owner
= THIS_MODULE
,
2389 .open
= sg_proc_open_devstrs
,
2391 .llseek
= seq_lseek
,
2392 .release
= seq_release
,
2394 static const struct seq_operations devstrs_seq_ops
= {
2395 .start
= dev_seq_start
,
2396 .next
= dev_seq_next
,
2397 .stop
= dev_seq_stop
,
2398 .show
= sg_proc_seq_show_devstrs
,
2401 static int sg_proc_seq_show_debug(struct seq_file
*s
, void *v
);
2402 static int sg_proc_open_debug(struct inode
*inode
, struct file
*file
);
2403 static const struct file_operations debug_fops
= {
2404 .owner
= THIS_MODULE
,
2405 .open
= sg_proc_open_debug
,
2407 .llseek
= seq_lseek
,
2408 .release
= seq_release
,
2410 static const struct seq_operations debug_seq_ops
= {
2411 .start
= dev_seq_start
,
2412 .next
= dev_seq_next
,
2413 .stop
= dev_seq_stop
,
2414 .show
= sg_proc_seq_show_debug
,
2418 struct sg_proc_leaf
{
2420 const struct file_operations
* fops
;
2423 static const struct sg_proc_leaf sg_proc_leaf_arr
[] = {
2424 {"allow_dio", &adio_fops
},
2425 {"debug", &debug_fops
},
2426 {"def_reserved_size", &dressz_fops
},
2427 {"device_hdr", &devhdr_fops
},
2428 {"devices", &dev_fops
},
2429 {"device_strs", &devstrs_fops
},
2430 {"version", &version_fops
}
2436 int num_leaves
= ARRAY_SIZE(sg_proc_leaf_arr
);
2439 sg_proc_sgp
= proc_mkdir(sg_proc_sg_dirname
, NULL
);
2442 for (k
= 0; k
< num_leaves
; ++k
) {
2443 const struct sg_proc_leaf
*leaf
= &sg_proc_leaf_arr
[k
];
2444 umode_t mask
= leaf
->fops
->write
? S_IRUGO
| S_IWUSR
: S_IRUGO
;
2445 proc_create(leaf
->name
, mask
, sg_proc_sgp
, leaf
->fops
);
2451 sg_proc_cleanup(void)
2454 int num_leaves
= ARRAY_SIZE(sg_proc_leaf_arr
);
2458 for (k
= 0; k
< num_leaves
; ++k
)
2459 remove_proc_entry(sg_proc_leaf_arr
[k
].name
, sg_proc_sgp
);
2460 remove_proc_entry(sg_proc_sg_dirname
, NULL
);
2464 static int sg_proc_seq_show_int(struct seq_file
*s
, void *v
)
2466 seq_printf(s
, "%d\n", *((int *)s
->private));
2470 static int sg_proc_single_open_adio(struct inode
*inode
, struct file
*file
)
2472 return single_open(file
, sg_proc_seq_show_int
, &sg_allow_dio
);
2476 sg_proc_write_adio(struct file
*filp
, const char __user
*buffer
,
2477 size_t count
, loff_t
*off
)
2482 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
2484 err
= kstrtoul_from_user(buffer
, count
, 0, &num
);
2487 sg_allow_dio
= num
? 1 : 0;
2491 static int sg_proc_single_open_dressz(struct inode
*inode
, struct file
*file
)
2493 return single_open(file
, sg_proc_seq_show_int
, &sg_big_buff
);
2497 sg_proc_write_dressz(struct file
*filp
, const char __user
*buffer
,
2498 size_t count
, loff_t
*off
)
2501 unsigned long k
= ULONG_MAX
;
2503 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
2506 err
= kstrtoul_from_user(buffer
, count
, 0, &k
);
2509 if (k
<= 1048576) { /* limit "big buff" to 1 MB */
2516 static int sg_proc_seq_show_version(struct seq_file
*s
, void *v
)
2518 seq_printf(s
, "%d\t%s [%s]\n", sg_version_num
, SG_VERSION_STR
,
2523 static int sg_proc_single_open_version(struct inode
*inode
, struct file
*file
)
2525 return single_open(file
, sg_proc_seq_show_version
, NULL
);
2528 static int sg_proc_seq_show_devhdr(struct seq_file
*s
, void *v
)
2530 seq_puts(s
, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
2534 static int sg_proc_single_open_devhdr(struct inode
*inode
, struct file
*file
)
2536 return single_open(file
, sg_proc_seq_show_devhdr
, NULL
);
2539 struct sg_proc_deviter
{
2544 static void * dev_seq_start(struct seq_file
*s
, loff_t
*pos
)
2546 struct sg_proc_deviter
* it
= kmalloc(sizeof(*it
), GFP_KERNEL
);
2553 it
->max
= sg_last_dev();
2554 if (it
->index
>= it
->max
)
2559 static void * dev_seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
2561 struct sg_proc_deviter
* it
= s
->private;
2564 return (it
->index
< it
->max
) ? it
: NULL
;
2567 static void dev_seq_stop(struct seq_file
*s
, void *v
)
2572 static int sg_proc_open_dev(struct inode
*inode
, struct file
*file
)
2574 return seq_open(file
, &dev_seq_ops
);
2577 static int sg_proc_seq_show_dev(struct seq_file
*s
, void *v
)
2579 struct sg_proc_deviter
* it
= (struct sg_proc_deviter
*) v
;
2581 struct scsi_device
*scsidp
;
2582 unsigned long iflags
;
2584 read_lock_irqsave(&sg_index_lock
, iflags
);
2585 sdp
= it
? sg_lookup_dev(it
->index
) : NULL
;
2586 if ((NULL
== sdp
) || (NULL
== sdp
->device
) ||
2587 (atomic_read(&sdp
->detaching
)))
2588 seq_puts(s
, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2590 scsidp
= sdp
->device
;
2591 seq_printf(s
, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n",
2592 scsidp
->host
->host_no
, scsidp
->channel
,
2593 scsidp
->id
, scsidp
->lun
, (int) scsidp
->type
,
2595 (int) scsidp
->queue_depth
,
2596 (int) atomic_read(&scsidp
->device_busy
),
2597 (int) scsi_device_online(scsidp
));
2599 read_unlock_irqrestore(&sg_index_lock
, iflags
);
2603 static int sg_proc_open_devstrs(struct inode
*inode
, struct file
*file
)
2605 return seq_open(file
, &devstrs_seq_ops
);
2608 static int sg_proc_seq_show_devstrs(struct seq_file
*s
, void *v
)
2610 struct sg_proc_deviter
* it
= (struct sg_proc_deviter
*) v
;
2612 struct scsi_device
*scsidp
;
2613 unsigned long iflags
;
2615 read_lock_irqsave(&sg_index_lock
, iflags
);
2616 sdp
= it
? sg_lookup_dev(it
->index
) : NULL
;
2617 scsidp
= sdp
? sdp
->device
: NULL
;
2618 if (sdp
&& scsidp
&& (!atomic_read(&sdp
->detaching
)))
2619 seq_printf(s
, "%8.8s\t%16.16s\t%4.4s\n",
2620 scsidp
->vendor
, scsidp
->model
, scsidp
->rev
);
2622 seq_puts(s
, "<no active device>\n");
2623 read_unlock_irqrestore(&sg_index_lock
, iflags
);
2627 /* must be called while holding sg_index_lock */
2628 static void sg_proc_debug_helper(struct seq_file
*s
, Sg_device
* sdp
)
2630 int k
, new_interface
, blen
, usg
;
2633 const sg_io_hdr_t
*hp
;
2638 list_for_each_entry(fp
, &sdp
->sfds
, sfd_siblings
) {
2640 read_lock(&fp
->rq_list_lock
); /* irqs already disabled */
2641 seq_printf(s
, " FD(%d): timeout=%dms bufflen=%d "
2642 "(res)sgat=%d low_dma=%d\n", k
,
2643 jiffies_to_msecs(fp
->timeout
),
2644 fp
->reserve
.bufflen
,
2645 (int) fp
->reserve
.k_use_sg
,
2646 (int) sdp
->device
->host
->unchecked_isa_dma
);
2647 seq_printf(s
, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
2648 (int) fp
->cmd_q
, (int) fp
->force_packid
,
2649 (int) fp
->keep_orphan
);
2650 list_for_each_entry(srp
, &fp
->rq_list
, entry
) {
2652 new_interface
= (hp
->interface_id
== '\0') ? 0 : 1;
2653 if (srp
->res_used
) {
2654 if (new_interface
&&
2655 (SG_FLAG_MMAP_IO
& hp
->flags
))
2660 if (SG_INFO_DIRECT_IO_MASK
& hp
->info
)
2666 blen
= srp
->data
.bufflen
;
2667 usg
= srp
->data
.k_use_sg
;
2668 seq_puts(s
, srp
->done
?
2669 ((1 == srp
->done
) ? "rcv:" : "fin:")
2671 seq_printf(s
, " id=%d blen=%d",
2672 srp
->header
.pack_id
, blen
);
2674 seq_printf(s
, " dur=%d", hp
->duration
);
2676 ms
= jiffies_to_msecs(jiffies
);
2677 seq_printf(s
, " t_o/elap=%d/%d",
2678 (new_interface
? hp
->timeout
:
2679 jiffies_to_msecs(fp
->timeout
)),
2680 (ms
> hp
->duration
? ms
- hp
->duration
: 0));
2682 seq_printf(s
, "ms sgat=%d op=0x%02x\n", usg
,
2683 (int) srp
->data
.cmd_opcode
);
2685 if (list_empty(&fp
->rq_list
))
2686 seq_puts(s
, " No requests active\n");
2687 read_unlock(&fp
->rq_list_lock
);
2691 static int sg_proc_open_debug(struct inode
*inode
, struct file
*file
)
2693 return seq_open(file
, &debug_seq_ops
);
2696 static int sg_proc_seq_show_debug(struct seq_file
*s
, void *v
)
2698 struct sg_proc_deviter
* it
= (struct sg_proc_deviter
*) v
;
2700 unsigned long iflags
;
2702 if (it
&& (0 == it
->index
))
2703 seq_printf(s
, "max_active_device=%d def_reserved_size=%d\n",
2704 (int)it
->max
, sg_big_buff
);
2706 read_lock_irqsave(&sg_index_lock
, iflags
);
2707 sdp
= it
? sg_lookup_dev(it
->index
) : NULL
;
2710 read_lock(&sdp
->sfd_lock
);
2711 if (!list_empty(&sdp
->sfds
)) {
2712 seq_printf(s
, " >>> device=%s ", sdp
->disk
->disk_name
);
2713 if (atomic_read(&sdp
->detaching
))
2714 seq_puts(s
, "detaching pending close ");
2715 else if (sdp
->device
) {
2716 struct scsi_device
*scsidp
= sdp
->device
;
2718 seq_printf(s
, "%d:%d:%d:%llu em=%d",
2719 scsidp
->host
->host_no
,
2720 scsidp
->channel
, scsidp
->id
,
2722 scsidp
->host
->hostt
->emulated
);
2724 seq_printf(s
, " sg_tablesize=%d excl=%d open_cnt=%d\n",
2725 sdp
->sg_tablesize
, sdp
->exclude
, sdp
->open_cnt
);
2726 sg_proc_debug_helper(s
, sdp
);
2728 read_unlock(&sdp
->sfd_lock
);
2730 read_unlock_irqrestore(&sg_index_lock
, iflags
);
2734 #endif /* CONFIG_SCSI_PROC_FS */
2736 module_init(init_sg
);
2737 module_exit(exit_sg
);