1 // SPDX-License-Identifier: GPL-2.0
3 * (C) 2001 Clemson University and The University of Chicago
5 * Changes by Acxiom Corporation to add protocol version to kernel
6 * communication, Copyright Acxiom Corporation, 2005.
8 * See COPYING in top-level directory.
12 #include "orangefs-kernel.h"
13 #include "orangefs-dev-proto.h"
14 #include "orangefs-bufmap.h"
15 #include "orangefs-debugfs.h"
17 #include <linux/debugfs.h>
18 #include <linux/slab.h>
20 /* this file implements the /dev/pvfs2-req device node */
22 uint32_t orangefs_userspace_version
;
24 static int open_access_count
;
26 static DEFINE_MUTEX(devreq_mutex
);
28 #define DUMP_DEVICE_ERROR() \
30 gossip_err("*****************************************************\n");\
31 gossip_err("ORANGEFS Device Error: You cannot open the device file "); \
32 gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \
33 "are no ", ORANGEFS_REQDEVICE_NAME); \
34 gossip_err("instances of a program using this device\ncurrently " \
35 "running. (You must verify this!)\n"); \
36 gossip_err("For example, you can use the lsof program as follows:\n");\
37 gossip_err("'lsof | grep %s' (run this as root)\n", \
38 ORANGEFS_REQDEVICE_NAME); \
39 gossip_err(" open_access_count = %d\n", open_access_count); \
40 gossip_err("*****************************************************\n");\
43 static int hash_func(__u64 tag
, int table_size
)
45 return do_div(tag
, (unsigned int)table_size
);
48 static void orangefs_devreq_add_op(struct orangefs_kernel_op_s
*op
)
50 int index
= hash_func(op
->tag
, hash_table_size
);
52 list_add_tail(&op
->list
, &orangefs_htable_ops_in_progress
[index
]);
56 * find the op with this tag and remove it from the in progress
59 static struct orangefs_kernel_op_s
*orangefs_devreq_remove_op(__u64 tag
)
61 struct orangefs_kernel_op_s
*op
, *next
;
64 index
= hash_func(tag
, hash_table_size
);
66 spin_lock(&orangefs_htable_ops_in_progress_lock
);
67 list_for_each_entry_safe(op
,
69 &orangefs_htable_ops_in_progress
[index
],
71 if (op
->tag
== tag
&& !op_state_purged(op
) &&
72 !op_state_given_up(op
)) {
73 list_del_init(&op
->list
);
74 spin_unlock(&orangefs_htable_ops_in_progress_lock
);
79 spin_unlock(&orangefs_htable_ops_in_progress_lock
);
83 /* Returns whether any FS are still pending remounted */
84 static int mark_all_pending_mounts(void)
87 struct orangefs_sb_info_s
*orangefs_sb
= NULL
;
89 spin_lock(&orangefs_superblocks_lock
);
90 list_for_each_entry(orangefs_sb
, &orangefs_superblocks
, list
) {
91 /* All of these file system require a remount */
92 orangefs_sb
->mount_pending
= 1;
95 spin_unlock(&orangefs_superblocks_lock
);
100 * Determine if a given file system needs to be remounted or not
101 * Returns -1 on error
102 * 0 if already mounted
105 static int fs_mount_pending(__s32 fsid
)
107 int mount_pending
= -1;
108 struct orangefs_sb_info_s
*orangefs_sb
= NULL
;
110 spin_lock(&orangefs_superblocks_lock
);
111 list_for_each_entry(orangefs_sb
, &orangefs_superblocks
, list
) {
112 if (orangefs_sb
->fs_id
== fsid
) {
113 mount_pending
= orangefs_sb
->mount_pending
;
117 spin_unlock(&orangefs_superblocks_lock
);
118 return mount_pending
;
121 static int orangefs_devreq_open(struct inode
*inode
, struct file
*file
)
125 /* in order to ensure that the filesystem driver sees correct UIDs */
126 if (file
->f_cred
->user_ns
!= &init_user_ns
) {
127 gossip_err("%s: device cannot be opened outside init_user_ns\n",
132 if (!(file
->f_flags
& O_NONBLOCK
)) {
133 gossip_err("%s: device cannot be opened in blocking mode\n",
138 gossip_debug(GOSSIP_DEV_DEBUG
, "client-core: opening device\n");
139 mutex_lock(&devreq_mutex
);
141 if (open_access_count
== 0) {
142 open_access_count
= 1;
147 mutex_unlock(&devreq_mutex
);
151 gossip_debug(GOSSIP_DEV_DEBUG
,
152 "pvfs2-client-core: open device complete (ret = %d)\n",
157 /* Function for read() callers into the device */
158 static ssize_t
orangefs_devreq_read(struct file
*file
,
160 size_t count
, loff_t
*offset
)
162 struct orangefs_kernel_op_s
*op
, *temp
;
163 __s32 proto_ver
= ORANGEFS_KERNEL_PROTO_VERSION
;
164 static __s32 magic
= ORANGEFS_DEVREQ_MAGIC
;
165 struct orangefs_kernel_op_s
*cur_op
= NULL
;
168 /* We do not support blocking IO. */
169 if (!(file
->f_flags
& O_NONBLOCK
)) {
170 gossip_err("%s: blocking read from client-core.\n",
176 * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then
177 * always read with that size buffer.
179 if (count
!= MAX_DEV_REQ_UPSIZE
) {
180 gossip_err("orangefs: client-core tried to read wrong size\n");
184 /* Check for an empty list before locking. */
185 if (list_empty(&orangefs_request_list
))
189 /* Get next op (if any) from top of list. */
190 spin_lock(&orangefs_request_list_lock
);
191 list_for_each_entry_safe(op
, temp
, &orangefs_request_list
, list
) {
193 /* This lock is held past the end of the loop when we break. */
194 spin_lock(&op
->lock
);
195 if (unlikely(op_state_purged(op
) || op_state_given_up(op
))) {
196 spin_unlock(&op
->lock
);
200 fsid
= fsid_of_op(op
);
201 if (fsid
!= ORANGEFS_FS_ID_NULL
) {
203 /* Skip ops whose filesystem needs to be mounted. */
204 ret
= fs_mount_pending(fsid
);
206 gossip_debug(GOSSIP_DEV_DEBUG
,
207 "%s: mount pending, skipping op tag "
211 get_opname_string(op
));
212 spin_unlock(&op
->lock
);
215 * Skip ops whose filesystem we don't know about unless
216 * it is being mounted or unmounted. It is possible for
217 * a filesystem we don't know about to be unmounted if
218 * it fails to mount in the kernel after userspace has
219 * been sent the mount request.
221 /* XXX: is there a better way to detect this? */
222 } else if (ret
== -1 &&
224 ORANGEFS_VFS_OP_FS_MOUNT
||
226 ORANGEFS_VFS_OP_GETATTR
||
228 ORANGEFS_VFS_OP_FS_UMOUNT
)) {
229 gossip_debug(GOSSIP_DEV_DEBUG
,
230 "orangefs: skipping op tag %llu %s\n",
231 llu(op
->tag
), get_opname_string(op
));
233 "orangefs: ERROR: fs_mount_pending %d\n",
235 spin_unlock(&op
->lock
);
240 * Either this op does not pertain to a filesystem, is mounting
241 * a filesystem, or pertains to a mounted filesystem. Let it
249 * At this point we either have a valid op and can continue or have not
250 * found an op and must ask the client to try again later.
253 spin_unlock(&orangefs_request_list_lock
);
257 gossip_debug(GOSSIP_DEV_DEBUG
, "%s: reading op tag %llu %s\n",
260 get_opname_string(cur_op
));
263 * Such an op should never be on the list in the first place. If so, we
266 if (op_state_in_progress(cur_op
) || op_state_serviced(cur_op
)) {
267 gossip_err("orangefs: ERROR: Current op already queued.\n");
268 list_del_init(&cur_op
->list
);
269 spin_unlock(&cur_op
->lock
);
270 spin_unlock(&orangefs_request_list_lock
);
274 list_del_init(&cur_op
->list
);
275 spin_unlock(&orangefs_request_list_lock
);
277 spin_unlock(&cur_op
->lock
);
279 /* Push the upcall out. */
280 ret
= copy_to_user(buf
, &proto_ver
, sizeof(__s32
));
283 ret
= copy_to_user(buf
+sizeof(__s32
), &magic
, sizeof(__s32
));
286 ret
= copy_to_user(buf
+2 * sizeof(__s32
), &cur_op
->tag
, sizeof(__u64
));
289 ret
= copy_to_user(buf
+2*sizeof(__s32
)+sizeof(__u64
), &cur_op
->upcall
,
290 sizeof(struct orangefs_upcall_s
));
294 spin_lock(&orangefs_htable_ops_in_progress_lock
);
295 spin_lock(&cur_op
->lock
);
296 if (unlikely(op_state_given_up(cur_op
))) {
297 spin_unlock(&cur_op
->lock
);
298 spin_unlock(&orangefs_htable_ops_in_progress_lock
);
299 complete(&cur_op
->waitq
);
304 * Set the operation to be in progress and move it between lists since
305 * it has been sent to the client.
307 set_op_state_inprogress(cur_op
);
308 gossip_debug(GOSSIP_DEV_DEBUG
,
309 "%s: 1 op:%s: op_state:%d: process:%s:\n",
311 get_opname_string(cur_op
),
314 orangefs_devreq_add_op(cur_op
);
315 spin_unlock(&cur_op
->lock
);
316 spin_unlock(&orangefs_htable_ops_in_progress_lock
);
318 /* The client only asks to read one size buffer. */
319 return MAX_DEV_REQ_UPSIZE
;
322 * We were unable to copy the op data to the client. Put the op back in
323 * list. If client has crashed, the op will be purged later when the
324 * device is released.
326 gossip_err("orangefs: Failed to copy data to user space\n");
327 spin_lock(&orangefs_request_list_lock
);
328 spin_lock(&cur_op
->lock
);
329 if (likely(!op_state_given_up(cur_op
))) {
330 set_op_state_waiting(cur_op
);
331 gossip_debug(GOSSIP_DEV_DEBUG
,
332 "%s: 2 op:%s: op_state:%d: process:%s:\n",
334 get_opname_string(cur_op
),
337 list_add(&cur_op
->list
, &orangefs_request_list
);
338 spin_unlock(&cur_op
->lock
);
340 spin_unlock(&cur_op
->lock
);
341 complete(&cur_op
->waitq
);
343 spin_unlock(&orangefs_request_list_lock
);
348 * Function for writev() callers into the device.
350 * Userspace should have written:
354 * - struct orangefs_downcall_s
355 * - trailer buffer (in the case of READDIR operations)
357 static ssize_t
orangefs_devreq_write_iter(struct kiocb
*iocb
,
358 struct iov_iter
*iter
)
361 struct orangefs_kernel_op_s
*op
= NULL
;
367 int total
= ret
= iov_iter_count(iter
);
368 int downcall_size
= sizeof(struct orangefs_downcall_s
);
369 int head_size
= sizeof(head
);
371 gossip_debug(GOSSIP_DEV_DEBUG
, "%s: total:%d: ret:%zd:\n",
376 if (total
< MAX_DEV_REQ_DOWNSIZE
) {
377 gossip_err("%s: total:%d: must be at least:%u:\n",
380 (unsigned int) MAX_DEV_REQ_DOWNSIZE
);
384 if (!copy_from_iter_full(&head
, head_size
, iter
)) {
385 gossip_err("%s: failed to copy head.\n", __func__
);
389 if (head
.version
< ORANGEFS_MINIMUM_USERSPACE_VERSION
) {
390 gossip_err("%s: userspace claims version"
391 "%d, minimum version required: %d.\n",
394 ORANGEFS_MINIMUM_USERSPACE_VERSION
);
398 if (head
.magic
!= ORANGEFS_DEVREQ_MAGIC
) {
399 gossip_err("Error: Device magic number does not match.\n");
403 if (!orangefs_userspace_version
) {
404 orangefs_userspace_version
= head
.version
;
405 } else if (orangefs_userspace_version
!= head
.version
) {
406 gossip_err("Error: userspace version changes\n");
410 /* remove the op from the in progress hash table */
411 op
= orangefs_devreq_remove_op(head
.tag
);
413 gossip_debug(GOSSIP_DEV_DEBUG
,
414 "%s: No one's waiting for tag %llu\n",
415 __func__
, llu(head
.tag
));
419 if (!copy_from_iter_full(&op
->downcall
, downcall_size
, iter
)) {
420 gossip_err("%s: failed to copy downcall.\n", __func__
);
424 if (op
->downcall
.status
)
428 * We've successfully peeled off the head and the downcall.
429 * Something has gone awry if total doesn't equal the
430 * sum of head_size, downcall_size and trailer_size.
432 if ((head_size
+ downcall_size
+ op
->downcall
.trailer_size
) != total
) {
433 gossip_err("%s: funky write, head_size:%d"
434 ": downcall_size:%d: trailer_size:%lld"
435 ": total size:%d:\n",
439 op
->downcall
.trailer_size
,
444 /* Only READDIR operations should have trailers. */
445 if ((op
->downcall
.type
!= ORANGEFS_VFS_OP_READDIR
) &&
446 (op
->downcall
.trailer_size
!= 0)) {
447 gossip_err("%s: %x operation with trailer.",
453 /* READDIR operations should always have trailers. */
454 if ((op
->downcall
.type
== ORANGEFS_VFS_OP_READDIR
) &&
455 (op
->downcall
.trailer_size
== 0)) {
456 gossip_err("%s: %x operation with no trailer.",
462 if (op
->downcall
.type
!= ORANGEFS_VFS_OP_READDIR
)
465 op
->downcall
.trailer_buf
= vmalloc(op
->downcall
.trailer_size
);
466 if (!op
->downcall
.trailer_buf
)
469 memset(op
->downcall
.trailer_buf
, 0, op
->downcall
.trailer_size
);
470 if (!copy_from_iter_full(op
->downcall
.trailer_buf
,
471 op
->downcall
.trailer_size
, iter
)) {
472 gossip_err("%s: failed to copy trailer.\n", __func__
);
473 vfree(op
->downcall
.trailer_buf
);
479 * Return to vfs waitqueue, and back to service_operation
480 * through wait_for_matching_downcall.
482 spin_lock(&op
->lock
);
483 if (unlikely(op_is_cancel(op
))) {
484 spin_unlock(&op
->lock
);
486 } else if (unlikely(op_state_given_up(op
))) {
487 spin_unlock(&op
->lock
);
488 complete(&op
->waitq
);
490 set_op_state_serviced(op
);
491 gossip_debug(GOSSIP_DEV_DEBUG
,
492 "%s: op:%s: op_state:%d: process:%s:\n",
494 get_opname_string(op
),
497 spin_unlock(&op
->lock
);
502 op
->downcall
.status
= -(ORANGEFS_ERROR_BIT
| 9);
507 op
->downcall
.status
= -(ORANGEFS_ERROR_BIT
| 8);
513 * NOTE: gets called when the last reference to this device is dropped.
514 * Using the open_access_count variable, we enforce a reference count
515 * on this file so that it can be opened by only one process at a time.
516 * the devreq_mutex is used to make sure all i/o has completed
517 * before we call orangefs_bufmap_finalize, and similar such tricky
520 static int orangefs_devreq_release(struct inode
*inode
, struct file
*file
)
524 gossip_debug(GOSSIP_DEV_DEBUG
,
525 "%s:pvfs2-client-core: exiting, closing device\n",
528 mutex_lock(&devreq_mutex
);
529 orangefs_bufmap_finalize();
531 open_access_count
= -1;
533 unmounted
= mark_all_pending_mounts();
534 gossip_debug(GOSSIP_DEV_DEBUG
, "ORANGEFS Device Close: Filesystem(s) %s\n",
535 (unmounted
? "UNMOUNTED" : "MOUNTED"));
538 purge_inprogress_ops();
540 orangefs_bufmap_run_down();
542 gossip_debug(GOSSIP_DEV_DEBUG
,
543 "pvfs2-client-core: device close complete\n");
544 open_access_count
= 0;
545 orangefs_userspace_version
= 0;
546 mutex_unlock(&devreq_mutex
);
550 int is_daemon_in_service(void)
555 * What this function does is checks if client-core is alive
556 * based on the access count we maintain on the device.
558 mutex_lock(&devreq_mutex
);
559 in_service
= open_access_count
== 1 ? 0 : -EIO
;
560 mutex_unlock(&devreq_mutex
);
564 bool __is_daemon_in_service(void)
566 return open_access_count
== 1;
569 static inline long check_ioctl_command(unsigned int command
)
571 /* Check for valid ioctl codes */
572 if (_IOC_TYPE(command
) != ORANGEFS_DEV_MAGIC
) {
573 gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
579 /* and valid ioctl commands */
580 if (_IOC_NR(command
) >= ORANGEFS_DEV_MAXNR
|| _IOC_NR(command
) <= 0) {
581 gossip_err("Invalid ioctl command number [%d >= %d]\n",
582 _IOC_NR(command
), ORANGEFS_DEV_MAXNR
);
588 static long dispatch_ioctl_command(unsigned int command
, unsigned long arg
)
590 static __s32 magic
= ORANGEFS_DEVREQ_MAGIC
;
591 static __s32 max_up_size
= MAX_DEV_REQ_UPSIZE
;
592 static __s32 max_down_size
= MAX_DEV_REQ_DOWNSIZE
;
593 struct ORANGEFS_dev_map_desc user_desc
;
595 int upstream_kmod
= 1;
596 struct orangefs_sb_info_s
*orangefs_sb
;
598 /* mtmoore: add locking here */
601 case ORANGEFS_DEV_GET_MAGIC
:
602 return ((put_user(magic
, (__s32 __user
*) arg
) == -EFAULT
) ?
605 case ORANGEFS_DEV_GET_MAX_UPSIZE
:
606 return ((put_user(max_up_size
,
607 (__s32 __user
*) arg
) == -EFAULT
) ?
610 case ORANGEFS_DEV_GET_MAX_DOWNSIZE
:
611 return ((put_user(max_down_size
,
612 (__s32 __user
*) arg
) == -EFAULT
) ?
615 case ORANGEFS_DEV_MAP
:
616 ret
= copy_from_user(&user_desc
,
617 (struct ORANGEFS_dev_map_desc __user
*)
619 sizeof(struct ORANGEFS_dev_map_desc
));
620 /* WTF -EIO and not -EFAULT? */
621 return ret
? -EIO
: orangefs_bufmap_initialize(&user_desc
);
622 case ORANGEFS_DEV_REMOUNT_ALL
:
623 gossip_debug(GOSSIP_DEV_DEBUG
,
624 "%s: got ORANGEFS_DEV_REMOUNT_ALL\n",
628 * remount all mounted orangefs volumes to regain the lost
629 * dynamic mount tables (if any) -- NOTE: this is done
630 * without keeping the superblock list locked due to the
631 * upcall/downcall waiting. also, the request mutex is
632 * used to ensure that no operations will be serviced until
633 * all of the remounts are serviced (to avoid ops between
636 ret
= mutex_lock_interruptible(&orangefs_request_mutex
);
639 gossip_debug(GOSSIP_DEV_DEBUG
,
640 "%s: priority remount in progress\n",
642 spin_lock(&orangefs_superblocks_lock
);
643 list_for_each_entry(orangefs_sb
, &orangefs_superblocks
, list
) {
645 * We have to drop the spinlock, so entries can be
646 * removed. They can't be freed, though, so we just
647 * keep the forward pointers and zero the back ones -
648 * that way we can get to the rest of the list.
650 if (!orangefs_sb
->list
.prev
)
652 gossip_debug(GOSSIP_DEV_DEBUG
,
653 "%s: Remounting SB %p\n",
657 spin_unlock(&orangefs_superblocks_lock
);
658 ret
= orangefs_remount(orangefs_sb
);
659 spin_lock(&orangefs_superblocks_lock
);
661 gossip_debug(GOSSIP_DEV_DEBUG
,
662 "SB %p remount failed\n",
667 spin_unlock(&orangefs_superblocks_lock
);
668 gossip_debug(GOSSIP_DEV_DEBUG
,
669 "%s: priority remount complete\n",
671 mutex_unlock(&orangefs_request_mutex
);
674 case ORANGEFS_DEV_UPSTREAM
:
675 ret
= copy_to_user((void __user
*)arg
,
677 sizeof(upstream_kmod
));
684 case ORANGEFS_DEV_CLIENT_MASK
:
685 return orangefs_debugfs_new_client_mask((void __user
*)arg
);
686 case ORANGEFS_DEV_CLIENT_STRING
:
687 return orangefs_debugfs_new_client_string((void __user
*)arg
);
688 case ORANGEFS_DEV_DEBUG
:
689 return orangefs_debugfs_new_debug((void __user
*)arg
);
696 static long orangefs_devreq_ioctl(struct file
*file
,
697 unsigned int command
, unsigned long arg
)
701 /* Check for properly constructed commands */
702 ret
= check_ioctl_command(command
);
706 return (int)dispatch_ioctl_command(command
, arg
);
709 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
711 /* Compat structure for the ORANGEFS_DEV_MAP ioctl */
712 struct ORANGEFS_dev_map_desc32
{
719 static unsigned long translate_dev_map26(unsigned long args
, long *error
)
721 struct ORANGEFS_dev_map_desc32 __user
*p32
= (void __user
*)args
;
723 * Depending on the architecture, allocate some space on the
724 * user-call-stack based on our expected layout.
726 struct ORANGEFS_dev_map_desc __user
*p
=
727 compat_alloc_user_space(sizeof(*p
));
731 /* get the ptr from the 32 bit user-space */
732 if (get_user(addr
, &p32
->ptr
))
734 /* try to put that into a 64-bit layout */
735 if (put_user(compat_ptr(addr
), &p
->ptr
))
737 /* copy the remaining fields */
738 if (copy_in_user(&p
->total_size
, &p32
->total_size
, sizeof(__s32
)))
740 if (copy_in_user(&p
->size
, &p32
->size
, sizeof(__s32
)))
742 if (copy_in_user(&p
->count
, &p32
->count
, sizeof(__s32
)))
744 return (unsigned long)p
;
751 * 32 bit user-space apps' ioctl handlers when kernel modules
752 * is compiled as a 64 bit one
754 static long orangefs_devreq_compat_ioctl(struct file
*filp
, unsigned int cmd
,
758 unsigned long arg
= args
;
760 /* Check for properly constructed commands */
761 ret
= check_ioctl_command(cmd
);
764 if (cmd
== ORANGEFS_DEV_MAP
) {
766 * convert the arguments to what we expect internally
769 arg
= translate_dev_map26(args
, &ret
);
771 gossip_err("Could not translate dev map\n");
775 /* no other ioctl requires translation */
776 return dispatch_ioctl_command(cmd
, arg
);
779 #endif /* CONFIG_COMPAT is in .config */
781 /* the assigned character device major number */
782 static int orangefs_dev_major
;
785 * Initialize orangefs device specific state:
786 * Must be called at module load time only
788 int orangefs_dev_init(void)
790 /* register orangefs-req device */
791 orangefs_dev_major
= register_chrdev(0,
792 ORANGEFS_REQDEVICE_NAME
,
793 &orangefs_devreq_file_operations
);
794 if (orangefs_dev_major
< 0) {
795 gossip_debug(GOSSIP_DEV_DEBUG
,
796 "Failed to register /dev/%s (error %d)\n",
797 ORANGEFS_REQDEVICE_NAME
, orangefs_dev_major
);
798 return orangefs_dev_major
;
801 gossip_debug(GOSSIP_DEV_DEBUG
,
802 "*** /dev/%s character device registered ***\n",
803 ORANGEFS_REQDEVICE_NAME
);
804 gossip_debug(GOSSIP_DEV_DEBUG
, "'mknod /dev/%s c %d 0'.\n",
805 ORANGEFS_REQDEVICE_NAME
, orangefs_dev_major
);
809 void orangefs_dev_cleanup(void)
811 unregister_chrdev(orangefs_dev_major
, ORANGEFS_REQDEVICE_NAME
);
812 gossip_debug(GOSSIP_DEV_DEBUG
,
813 "*** /dev/%s character device unregistered ***\n",
814 ORANGEFS_REQDEVICE_NAME
);
817 static unsigned int orangefs_devreq_poll(struct file
*file
,
818 struct poll_table_struct
*poll_table
)
820 int poll_revent_mask
= 0;
822 poll_wait(file
, &orangefs_request_list_waitq
, poll_table
);
824 if (!list_empty(&orangefs_request_list
))
825 poll_revent_mask
|= POLL_IN
;
826 return poll_revent_mask
;
829 const struct file_operations orangefs_devreq_file_operations
= {
830 .owner
= THIS_MODULE
,
831 .read
= orangefs_devreq_read
,
832 .write_iter
= orangefs_devreq_write_iter
,
833 .open
= orangefs_devreq_open
,
834 .release
= orangefs_devreq_release
,
835 .unlocked_ioctl
= orangefs_devreq_ioctl
,
837 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
838 .compat_ioctl
= orangefs_devreq_compat_ioctl
,
840 .poll
= orangefs_devreq_poll