]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/orangefs/devorangefs-req.c
Orangefs: change pvfs2 filenames to orangefs
[mirror_ubuntu-bionic-kernel.git] / fs / orangefs / devorangefs-req.c
1 /*
2 * (C) 2001 Clemson University and The University of Chicago
3 *
4 * Changes by Acxiom Corporation to add protocol version to kernel
5 * communication, Copyright Acxiom Corporation, 2005.
6 *
7 * See COPYING in top-level directory.
8 */
9
10 #include "protocol.h"
11 #include "orangefs-kernel.h"
12 #include "orangefs-dev-proto.h"
13 #include "orangefs-bufmap.h"
14
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17
18 /* this file implements the /dev/pvfs2-req device node */
19
20 static int open_access_count;
21
22 #define DUMP_DEVICE_ERROR() \
23 do { \
24 gossip_err("*****************************************************\n");\
25 gossip_err("ORANGEFS Device Error: You cannot open the device file "); \
26 gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \
27 "are no ", ORANGEFS_REQDEVICE_NAME); \
28 gossip_err("instances of a program using this device\ncurrently " \
29 "running. (You must verify this!)\n"); \
30 gossip_err("For example, you can use the lsof program as follows:\n");\
31 gossip_err("'lsof | grep %s' (run this as root)\n", \
32 ORANGEFS_REQDEVICE_NAME); \
33 gossip_err(" open_access_count = %d\n", open_access_count); \
34 gossip_err("*****************************************************\n");\
35 } while (0)
36
37 static int hash_func(__u64 tag, int table_size)
38 {
39 return do_div(tag, (unsigned int)table_size);
40 }
41
42 static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op)
43 {
44 int index = hash_func(op->tag, hash_table_size);
45
46 spin_lock(&htable_ops_in_progress_lock);
47 list_add_tail(&op->list, &htable_ops_in_progress[index]);
48 spin_unlock(&htable_ops_in_progress_lock);
49 }
50
51 static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag)
52 {
53 struct orangefs_kernel_op_s *op, *next;
54 int index;
55
56 index = hash_func(tag, hash_table_size);
57
58 spin_lock(&htable_ops_in_progress_lock);
59 list_for_each_entry_safe(op,
60 next,
61 &htable_ops_in_progress[index],
62 list) {
63 if (op->tag == tag) {
64 list_del(&op->list);
65 spin_unlock(&htable_ops_in_progress_lock);
66 return op;
67 }
68 }
69
70 spin_unlock(&htable_ops_in_progress_lock);
71 return NULL;
72 }
73
74 static int orangefs_devreq_open(struct inode *inode, struct file *file)
75 {
76 int ret = -EINVAL;
77
78 if (!(file->f_flags & O_NONBLOCK)) {
79 gossip_err("orangefs: device cannot be opened in blocking mode\n");
80 goto out;
81 }
82 ret = -EACCES;
83 gossip_debug(GOSSIP_DEV_DEBUG, "pvfs2-client-core: opening device\n");
84 mutex_lock(&devreq_mutex);
85
86 if (open_access_count == 0) {
87 ret = generic_file_open(inode, file);
88 if (ret == 0)
89 open_access_count++;
90 } else {
91 DUMP_DEVICE_ERROR();
92 }
93 mutex_unlock(&devreq_mutex);
94
95 out:
96
97 gossip_debug(GOSSIP_DEV_DEBUG,
98 "pvfs2-client-core: open device complete (ret = %d)\n",
99 ret);
100 return ret;
101 }
102
103 static ssize_t orangefs_devreq_read(struct file *file,
104 char __user *buf,
105 size_t count, loff_t *offset)
106 {
107 struct orangefs_kernel_op_s *op, *temp;
108 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
109 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
110 struct orangefs_kernel_op_s *cur_op = NULL;
111 unsigned long ret;
112
113 /* We do not support blocking IO. */
114 if (!(file->f_flags & O_NONBLOCK)) {
115 gossip_err("orangefs: blocking reads are not supported! (pvfs2-client-core bug)\n");
116 return -EINVAL;
117 }
118
119 /*
120 * The client will do an ioctl to find MAX_ALIGNED_DEV_REQ_UPSIZE, then
121 * always read with that size buffer.
122 */
123 if (count != MAX_ALIGNED_DEV_REQ_UPSIZE) {
124 gossip_err("orangefs: client-core tried to read wrong size\n");
125 return -EINVAL;
126 }
127
128 /* Get next op (if any) from top of list. */
129 spin_lock(&orangefs_request_list_lock);
130 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
131 __s32 fsid;
132 /* This lock is held past the end of the loop when we break. */
133 spin_lock(&op->lock);
134
135 fsid = fsid_of_op(op);
136 if (fsid != ORANGEFS_FS_ID_NULL) {
137 int ret;
138 /* Skip ops whose filesystem needs to be mounted. */
139 ret = fs_mount_pending(fsid);
140 if (ret == 1) {
141 gossip_debug(GOSSIP_DEV_DEBUG,
142 "orangefs: skipping op tag %llu %s\n",
143 llu(op->tag), get_opname_string(op));
144 spin_unlock(&op->lock);
145 continue;
146 /* Skip ops whose filesystem we don't know about unless
147 * it is being mounted. */
148 /* XXX: is there a better way to detect this? */
149 } else if (ret == -1 &&
150 !(op->upcall.type == ORANGEFS_VFS_OP_FS_MOUNT ||
151 op->upcall.type == ORANGEFS_VFS_OP_GETATTR)) {
152 gossip_debug(GOSSIP_DEV_DEBUG,
153 "orangefs: skipping op tag %llu %s\n",
154 llu(op->tag), get_opname_string(op));
155 gossip_err(
156 "orangefs: ERROR: fs_mount_pending %d\n",
157 fsid);
158 spin_unlock(&op->lock);
159 continue;
160 }
161 }
162 /*
163 * Either this op does not pertain to a filesystem, is mounting
164 * a filesystem, or pertains to a mounted filesystem. Let it
165 * through.
166 */
167 cur_op = op;
168 break;
169 }
170
171 /*
172 * At this point we either have a valid op and can continue or have not
173 * found an op and must ask the client to try again later.
174 */
175 if (!cur_op) {
176 spin_unlock(&orangefs_request_list_lock);
177 return -EAGAIN;
178 }
179
180 gossip_debug(GOSSIP_DEV_DEBUG, "orangefs: reading op tag %llu %s\n",
181 llu(cur_op->tag), get_opname_string(cur_op));
182
183 /*
184 * Such an op should never be on the list in the first place. If so, we
185 * will abort.
186 */
187 if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) {
188 gossip_err("orangefs: ERROR: Current op already queued.\n");
189 list_del(&cur_op->list);
190 spin_unlock(&cur_op->lock);
191 spin_unlock(&orangefs_request_list_lock);
192 return -EAGAIN;
193 }
194
195 /*
196 * Set the operation to be in progress and move it between lists since
197 * it has been sent to the client.
198 */
199 set_op_state_inprogress(cur_op);
200
201 list_del(&cur_op->list);
202 spin_unlock(&orangefs_request_list_lock);
203 orangefs_devreq_add_op(cur_op);
204 spin_unlock(&cur_op->lock);
205
206 /* Push the upcall out. */
207 ret = copy_to_user(buf, &proto_ver, sizeof(__s32));
208 if (ret != 0)
209 goto error;
210 ret = copy_to_user(buf+sizeof(__s32), &magic, sizeof(__s32));
211 if (ret != 0)
212 goto error;
213 ret = copy_to_user(buf+2 * sizeof(__s32), &cur_op->tag, sizeof(__u64));
214 if (ret != 0)
215 goto error;
216 ret = copy_to_user(buf+2*sizeof(__s32)+sizeof(__u64), &cur_op->upcall,
217 sizeof(struct orangefs_upcall_s));
218 if (ret != 0)
219 goto error;
220
221 /* The client only asks to read one size buffer. */
222 return MAX_ALIGNED_DEV_REQ_UPSIZE;
223 error:
224 /*
225 * We were unable to copy the op data to the client. Put the op back in
226 * list. If client has crashed, the op will be purged later when the
227 * device is released.
228 */
229 gossip_err("orangefs: Failed to copy data to user space\n");
230 spin_lock(&orangefs_request_list_lock);
231 spin_lock(&cur_op->lock);
232 set_op_state_waiting(cur_op);
233 orangefs_devreq_remove_op(cur_op->tag);
234 list_add(&cur_op->list, &orangefs_request_list);
235 spin_unlock(&cur_op->lock);
236 spin_unlock(&orangefs_request_list_lock);
237 return -EFAULT;
238 }
239
240 /* Function for writev() callers into the device */
241 static ssize_t orangefs_devreq_writev(struct file *file,
242 const struct iovec *iov,
243 size_t count,
244 loff_t *offset)
245 {
246 struct orangefs_kernel_op_s *op = NULL;
247 void *buffer = NULL;
248 void *ptr = NULL;
249 unsigned long i = 0;
250 static int max_downsize = MAX_ALIGNED_DEV_REQ_DOWNSIZE;
251 int ret = 0, num_remaining = max_downsize;
252 int notrailer_count = 4; /* num elements in iovec without trailer */
253 int payload_size = 0;
254 __s32 magic = 0;
255 __s32 proto_ver = 0;
256 __u64 tag = 0;
257 ssize_t total_returned_size = 0;
258
259 /* Either there is a trailer or there isn't */
260 if (count != notrailer_count && count != (notrailer_count + 1)) {
261 gossip_err("Error: Number of iov vectors is (%zu) and notrailer count is %d\n",
262 count,
263 notrailer_count);
264 return -EPROTO;
265 }
266 buffer = dev_req_alloc();
267 if (!buffer)
268 return -ENOMEM;
269 ptr = buffer;
270
271 for (i = 0; i < notrailer_count; i++) {
272 if (iov[i].iov_len > num_remaining) {
273 gossip_err
274 ("writev error: Freeing buffer and returning\n");
275 dev_req_release(buffer);
276 return -EMSGSIZE;
277 }
278 ret = copy_from_user(ptr, iov[i].iov_base, iov[i].iov_len);
279 if (ret) {
280 gossip_err("Failed to copy data from user space\n");
281 dev_req_release(buffer);
282 return -EIO;
283 }
284 num_remaining -= iov[i].iov_len;
285 ptr += iov[i].iov_len;
286 payload_size += iov[i].iov_len;
287 }
288 total_returned_size = payload_size;
289
290 /* these elements are currently 8 byte aligned (8 bytes for (version +
291 * magic) 8 bytes for tag). If you add another element, either
292 * make it 8 bytes big, or use get_unaligned when asigning.
293 */
294 ptr = buffer;
295 proto_ver = *((__s32 *) ptr);
296 ptr += sizeof(__s32);
297
298 magic = *((__s32 *) ptr);
299 ptr += sizeof(__s32);
300
301 tag = *((__u64 *) ptr);
302 ptr += sizeof(__u64);
303
304 if (magic != ORANGEFS_DEVREQ_MAGIC) {
305 gossip_err("Error: Device magic number does not match.\n");
306 dev_req_release(buffer);
307 return -EPROTO;
308 }
309
310 /*
311 * proto_ver = 20902 for 2.9.2
312 */
313
314 op = orangefs_devreq_remove_op(tag);
315 if (op) {
316 /* Increase ref count! */
317 get_op(op);
318 /* cut off magic and tag from payload size */
319 payload_size -= (2 * sizeof(__s32) + sizeof(__u64));
320 if (payload_size <= sizeof(struct orangefs_downcall_s))
321 /* copy the passed in downcall into the op */
322 memcpy(&op->downcall,
323 ptr,
324 sizeof(struct orangefs_downcall_s));
325 else
326 gossip_debug(GOSSIP_DEV_DEBUG,
327 "writev: Ignoring %d bytes\n",
328 payload_size);
329
330 /* Do not allocate needlessly if client-core forgets
331 * to reset trailer size on op errors.
332 */
333 if (op->downcall.status == 0 && op->downcall.trailer_size > 0) {
334 __u64 trailer_size = op->downcall.trailer_size;
335 size_t size;
336 gossip_debug(GOSSIP_DEV_DEBUG,
337 "writev: trailer size %ld\n",
338 (unsigned long)trailer_size);
339 if (count != (notrailer_count + 1)) {
340 gossip_err("Error: trailer size (%ld) is non-zero, no trailer elements though? (%zu)\n", (unsigned long)trailer_size, count);
341 dev_req_release(buffer);
342 put_op(op);
343 return -EPROTO;
344 }
345 size = iov[notrailer_count].iov_len;
346 if (size > trailer_size) {
347 gossip_err("writev error: trailer size (%ld) != iov_len (%zd)\n", (unsigned long)trailer_size, size);
348 dev_req_release(buffer);
349 put_op(op);
350 return -EMSGSIZE;
351 }
352 /* Allocate a buffer large enough to hold the
353 * trailer bytes.
354 */
355 op->downcall.trailer_buf = vmalloc(trailer_size);
356 if (op->downcall.trailer_buf != NULL) {
357 gossip_debug(GOSSIP_DEV_DEBUG, "vmalloc: %p\n",
358 op->downcall.trailer_buf);
359 ret = copy_from_user(op->downcall.trailer_buf,
360 iov[notrailer_count].
361 iov_base,
362 size);
363 if (ret) {
364 gossip_err("Failed to copy trailer data from user space\n");
365 dev_req_release(buffer);
366 gossip_debug(GOSSIP_DEV_DEBUG,
367 "vfree: %p\n",
368 op->downcall.trailer_buf);
369 vfree(op->downcall.trailer_buf);
370 op->downcall.trailer_buf = NULL;
371 put_op(op);
372 return -EIO;
373 }
374 memset(op->downcall.trailer_buf + size, 0,
375 trailer_size - size);
376 } else {
377 /* Change downcall status */
378 op->downcall.status = -ENOMEM;
379 gossip_err("writev: could not vmalloc for trailer!\n");
380 }
381 }
382
383 /* if this operation is an I/O operation and if it was
384 * initiated on behalf of a *synchronous* VFS I/O operation,
385 * only then we need to wait
386 * for all data to be copied before we can return to avoid
387 * buffer corruption and races that can pull the buffers
388 * out from under us.
389 *
390 * Essentially we're synchronizing with other parts of the
391 * vfs implicitly by not allowing the user space
392 * application reading/writing this device to return until
393 * the buffers are done being used.
394 */
395 if (op->upcall.type == ORANGEFS_VFS_OP_FILE_IO &&
396 op->upcall.req.io.async_vfs_io == ORANGEFS_VFS_SYNC_IO) {
397 int timed_out = 0;
398 DECLARE_WAITQUEUE(wait_entry, current);
399
400 /* tell the vfs op waiting on a waitqueue
401 * that this op is done
402 */
403 spin_lock(&op->lock);
404 set_op_state_serviced(op);
405 spin_unlock(&op->lock);
406
407 add_wait_queue_exclusive(&op->io_completion_waitq,
408 &wait_entry);
409 wake_up_interruptible(&op->waitq);
410
411 while (1) {
412 set_current_state(TASK_INTERRUPTIBLE);
413
414 spin_lock(&op->lock);
415 if (op->io_completed) {
416 spin_unlock(&op->lock);
417 break;
418 }
419 spin_unlock(&op->lock);
420
421 if (!signal_pending(current)) {
422 int timeout =
423 MSECS_TO_JIFFIES(1000 *
424 op_timeout_secs);
425 if (!schedule_timeout(timeout)) {
426 gossip_debug(GOSSIP_DEV_DEBUG, "*** I/O wait time is up\n");
427 timed_out = 1;
428 break;
429 }
430 continue;
431 }
432
433 gossip_debug(GOSSIP_DEV_DEBUG, "*** signal on I/O wait -- aborting\n");
434 break;
435 }
436
437 set_current_state(TASK_RUNNING);
438 remove_wait_queue(&op->io_completion_waitq,
439 &wait_entry);
440
441 /* NOTE: for I/O operations we handle releasing the op
442 * object except in the case of timeout. the reason we
443 * can't free the op in timeout cases is that the op
444 * service logic in the vfs retries operations using
445 * the same op ptr, thus it can't be freed.
446 */
447 if (!timed_out)
448 op_release(op);
449 } else {
450
451 /*
452 * tell the vfs op waiting on a waitqueue that
453 * this op is done
454 */
455 spin_lock(&op->lock);
456 set_op_state_serviced(op);
457 spin_unlock(&op->lock);
458 /*
459 * for every other operation (i.e. non-I/O), we need to
460 * wake up the callers for downcall completion
461 * notification
462 */
463 wake_up_interruptible(&op->waitq);
464 }
465 } else {
466 /* ignore downcalls that we're not interested in */
467 gossip_debug(GOSSIP_DEV_DEBUG,
468 "WARNING: No one's waiting for tag %llu\n",
469 llu(tag));
470 }
471 dev_req_release(buffer);
472
473 return total_returned_size;
474 }
475
476 static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
477 struct iov_iter *iter)
478 {
479 return orangefs_devreq_writev(iocb->ki_filp,
480 iter->iov,
481 iter->nr_segs,
482 &iocb->ki_pos);
483 }
484
485 /* Returns whether any FS are still pending remounted */
486 static int mark_all_pending_mounts(void)
487 {
488 int unmounted = 1;
489 struct orangefs_sb_info_s *orangefs_sb = NULL;
490
491 spin_lock(&orangefs_superblocks_lock);
492 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
493 /* All of these file system require a remount */
494 orangefs_sb->mount_pending = 1;
495 unmounted = 0;
496 }
497 spin_unlock(&orangefs_superblocks_lock);
498 return unmounted;
499 }
500
501 /*
502 * Determine if a given file system needs to be remounted or not
503 * Returns -1 on error
504 * 0 if already mounted
505 * 1 if needs remount
506 */
507 int fs_mount_pending(__s32 fsid)
508 {
509 int mount_pending = -1;
510 struct orangefs_sb_info_s *orangefs_sb = NULL;
511
512 spin_lock(&orangefs_superblocks_lock);
513 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
514 if (orangefs_sb->fs_id == fsid) {
515 mount_pending = orangefs_sb->mount_pending;
516 break;
517 }
518 }
519 spin_unlock(&orangefs_superblocks_lock);
520 return mount_pending;
521 }
522
523 /*
524 * NOTE: gets called when the last reference to this device is dropped.
525 * Using the open_access_count variable, we enforce a reference count
526 * on this file so that it can be opened by only one process at a time.
527 * the devreq_mutex is used to make sure all i/o has completed
528 * before we call orangefs_bufmap_finalize, and similar such tricky
529 * situations
530 */
531 static int orangefs_devreq_release(struct inode *inode, struct file *file)
532 {
533 int unmounted = 0;
534
535 gossip_debug(GOSSIP_DEV_DEBUG,
536 "%s:pvfs2-client-core: exiting, closing device\n",
537 __func__);
538
539 mutex_lock(&devreq_mutex);
540 orangefs_bufmap_finalize();
541
542 open_access_count--;
543
544 unmounted = mark_all_pending_mounts();
545 gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n",
546 (unmounted ? "UNMOUNTED" : "MOUNTED"));
547 mutex_unlock(&devreq_mutex);
548
549 /*
550 * Walk through the list of ops in the request list, mark them
551 * as purged and wake them up.
552 */
553 purge_waiting_ops();
554 /*
555 * Walk through the hash table of in progress operations; mark
556 * them as purged and wake them up
557 */
558 purge_inprogress_ops();
559 gossip_debug(GOSSIP_DEV_DEBUG,
560 "pvfs2-client-core: device close complete\n");
561 return 0;
562 }
563
564 int is_daemon_in_service(void)
565 {
566 int in_service;
567
568 /*
569 * What this function does is checks if client-core is alive
570 * based on the access count we maintain on the device.
571 */
572 mutex_lock(&devreq_mutex);
573 in_service = open_access_count == 1 ? 0 : -EIO;
574 mutex_unlock(&devreq_mutex);
575 return in_service;
576 }
577
578 static inline long check_ioctl_command(unsigned int command)
579 {
580 /* Check for valid ioctl codes */
581 if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) {
582 gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
583 command,
584 _IOC_TYPE(command),
585 ORANGEFS_DEV_MAGIC);
586 return -EINVAL;
587 }
588 /* and valid ioctl commands */
589 if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) {
590 gossip_err("Invalid ioctl command number [%d >= %d]\n",
591 _IOC_NR(command), ORANGEFS_DEV_MAXNR);
592 return -ENOIOCTLCMD;
593 }
594 return 0;
595 }
596
597 static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
598 {
599 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
600 static __s32 max_up_size = MAX_ALIGNED_DEV_REQ_UPSIZE;
601 static __s32 max_down_size = MAX_ALIGNED_DEV_REQ_DOWNSIZE;
602 struct ORANGEFS_dev_map_desc user_desc;
603 int ret = 0;
604 struct dev_mask_info_s mask_info = { 0 };
605 struct dev_mask2_info_s mask2_info = { 0, 0 };
606 int upstream_kmod = 1;
607 struct list_head *tmp = NULL;
608 struct orangefs_sb_info_s *orangefs_sb = NULL;
609
610 /* mtmoore: add locking here */
611
612 switch (command) {
613 case ORANGEFS_DEV_GET_MAGIC:
614 return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ?
615 -EIO :
616 0);
617 case ORANGEFS_DEV_GET_MAX_UPSIZE:
618 return ((put_user(max_up_size,
619 (__s32 __user *) arg) == -EFAULT) ?
620 -EIO :
621 0);
622 case ORANGEFS_DEV_GET_MAX_DOWNSIZE:
623 return ((put_user(max_down_size,
624 (__s32 __user *) arg) == -EFAULT) ?
625 -EIO :
626 0);
627 case ORANGEFS_DEV_MAP:
628 ret = copy_from_user(&user_desc,
629 (struct ORANGEFS_dev_map_desc __user *)
630 arg,
631 sizeof(struct ORANGEFS_dev_map_desc));
632 return ret ? -EIO : orangefs_bufmap_initialize(&user_desc);
633 case ORANGEFS_DEV_REMOUNT_ALL:
634 gossip_debug(GOSSIP_DEV_DEBUG,
635 "orangefs_devreq_ioctl: got ORANGEFS_DEV_REMOUNT_ALL\n");
636
637 /*
638 * remount all mounted orangefs volumes to regain the lost
639 * dynamic mount tables (if any) -- NOTE: this is done
640 * without keeping the superblock list locked due to the
641 * upcall/downcall waiting. also, the request semaphore is
642 * used to ensure that no operations will be serviced until
643 * all of the remounts are serviced (to avoid ops between
644 * mounts to fail)
645 */
646 ret = mutex_lock_interruptible(&request_mutex);
647 if (ret < 0)
648 return ret;
649 gossip_debug(GOSSIP_DEV_DEBUG,
650 "orangefs_devreq_ioctl: priority remount in progress\n");
651 list_for_each(tmp, &orangefs_superblocks) {
652 orangefs_sb =
653 list_entry(tmp, struct orangefs_sb_info_s, list);
654 if (orangefs_sb && (orangefs_sb->sb)) {
655 gossip_debug(GOSSIP_DEV_DEBUG,
656 "Remounting SB %p\n",
657 orangefs_sb);
658
659 ret = orangefs_remount(orangefs_sb->sb);
660 if (ret) {
661 gossip_debug(GOSSIP_DEV_DEBUG,
662 "SB %p remount failed\n",
663 orangefs_sb);
664 break;
665 }
666 }
667 }
668 gossip_debug(GOSSIP_DEV_DEBUG,
669 "orangefs_devreq_ioctl: priority remount complete\n");
670 mutex_unlock(&request_mutex);
671 return ret;
672
673 case ORANGEFS_DEV_UPSTREAM:
674 ret = copy_to_user((void __user *)arg,
675 &upstream_kmod,
676 sizeof(upstream_kmod));
677
678 if (ret != 0)
679 return -EIO;
680 else
681 return ret;
682
683 case ORANGEFS_DEV_CLIENT_MASK:
684 ret = copy_from_user(&mask2_info,
685 (void __user *)arg,
686 sizeof(struct dev_mask2_info_s));
687
688 if (ret != 0)
689 return -EIO;
690
691 client_debug_mask.mask1 = mask2_info.mask1_value;
692 client_debug_mask.mask2 = mask2_info.mask2_value;
693
694 pr_info("%s: client debug mask has been been received "
695 ":%llx: :%llx:\n",
696 __func__,
697 (unsigned long long)client_debug_mask.mask1,
698 (unsigned long long)client_debug_mask.mask2);
699
700 return ret;
701
702 case ORANGEFS_DEV_CLIENT_STRING:
703 ret = copy_from_user(&client_debug_array_string,
704 (void __user *)arg,
705 ORANGEFS_MAX_DEBUG_STRING_LEN);
706 if (ret != 0) {
707 pr_info("%s: "
708 "ORANGEFS_DEV_CLIENT_STRING: copy_from_user failed"
709 "\n",
710 __func__);
711 return -EIO;
712 }
713
714 pr_info("%s: client debug array string has been been received."
715 "\n",
716 __func__);
717
718 if (!help_string_initialized) {
719
720 /* Free the "we don't know yet" default string... */
721 kfree(debug_help_string);
722
723 /* build a proper debug help string */
724 if (orangefs_prepare_debugfs_help_string(0)) {
725 gossip_err("%s: "
726 "prepare_debugfs_help_string failed"
727 "\n",
728 __func__);
729 return -EIO;
730 }
731
732 /* Replace the boilerplate boot-time debug-help file. */
733 debugfs_remove(help_file_dentry);
734
735 help_file_dentry =
736 debugfs_create_file(
737 ORANGEFS_KMOD_DEBUG_HELP_FILE,
738 0444,
739 debug_dir,
740 debug_help_string,
741 &debug_help_fops);
742
743 if (!help_file_dentry) {
744 gossip_err("%s: debugfs_create_file failed for"
745 " :%s:!\n",
746 __func__,
747 ORANGEFS_KMOD_DEBUG_HELP_FILE);
748 return -EIO;
749 }
750 }
751
752 debug_mask_to_string(&client_debug_mask, 1);
753
754 debugfs_remove(client_debug_dentry);
755
756 orangefs_client_debug_init();
757
758 help_string_initialized++;
759
760 return ret;
761
762 case ORANGEFS_DEV_DEBUG:
763 ret = copy_from_user(&mask_info,
764 (void __user *)arg,
765 sizeof(mask_info));
766
767 if (ret != 0)
768 return -EIO;
769
770 if (mask_info.mask_type == KERNEL_MASK) {
771 if ((mask_info.mask_value == 0)
772 && (kernel_mask_set_mod_init)) {
773 /*
774 * the kernel debug mask was set when the
775 * kernel module was loaded; don't override
776 * it if the client-core was started without
777 * a value for ORANGEFS_KMODMASK.
778 */
779 return 0;
780 }
781 debug_mask_to_string(&mask_info.mask_value,
782 mask_info.mask_type);
783 gossip_debug_mask = mask_info.mask_value;
784 pr_info("ORANGEFS: kernel debug mask has been modified to "
785 ":%s: :%llx:\n",
786 kernel_debug_string,
787 (unsigned long long)gossip_debug_mask);
788 } else if (mask_info.mask_type == CLIENT_MASK) {
789 debug_mask_to_string(&mask_info.mask_value,
790 mask_info.mask_type);
791 pr_info("ORANGEFS: client debug mask has been modified to"
792 ":%s: :%llx:\n",
793 client_debug_string,
794 llu(mask_info.mask_value));
795 } else {
796 gossip_lerr("Invalid mask type....\n");
797 return -EINVAL;
798 }
799
800 return ret;
801
802 default:
803 return -ENOIOCTLCMD;
804 }
805 return -ENOIOCTLCMD;
806 }
807
808 static long orangefs_devreq_ioctl(struct file *file,
809 unsigned int command, unsigned long arg)
810 {
811 long ret;
812
813 /* Check for properly constructed commands */
814 ret = check_ioctl_command(command);
815 if (ret < 0)
816 return (int)ret;
817
818 return (int)dispatch_ioctl_command(command, arg);
819 }
820
821 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
822
823 /* Compat structure for the ORANGEFS_DEV_MAP ioctl */
824 struct ORANGEFS_dev_map_desc32 {
825 compat_uptr_t ptr;
826 __s32 total_size;
827 __s32 size;
828 __s32 count;
829 };
830
831 static unsigned long translate_dev_map26(unsigned long args, long *error)
832 {
833 struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args;
834 /*
835 * Depending on the architecture, allocate some space on the
836 * user-call-stack based on our expected layout.
837 */
838 struct ORANGEFS_dev_map_desc __user *p =
839 compat_alloc_user_space(sizeof(*p));
840 compat_uptr_t addr;
841
842 *error = 0;
843 /* get the ptr from the 32 bit user-space */
844 if (get_user(addr, &p32->ptr))
845 goto err;
846 /* try to put that into a 64-bit layout */
847 if (put_user(compat_ptr(addr), &p->ptr))
848 goto err;
849 /* copy the remaining fields */
850 if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32)))
851 goto err;
852 if (copy_in_user(&p->size, &p32->size, sizeof(__s32)))
853 goto err;
854 if (copy_in_user(&p->count, &p32->count, sizeof(__s32)))
855 goto err;
856 return (unsigned long)p;
857 err:
858 *error = -EFAULT;
859 return 0;
860 }
861
862 /*
863 * 32 bit user-space apps' ioctl handlers when kernel modules
864 * is compiled as a 64 bit one
865 */
866 static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd,
867 unsigned long args)
868 {
869 long ret;
870 unsigned long arg = args;
871
872 /* Check for properly constructed commands */
873 ret = check_ioctl_command(cmd);
874 if (ret < 0)
875 return ret;
876 if (cmd == ORANGEFS_DEV_MAP) {
877 /*
878 * convert the arguments to what we expect internally
879 * in kernel space
880 */
881 arg = translate_dev_map26(args, &ret);
882 if (ret < 0) {
883 gossip_err("Could not translate dev map\n");
884 return ret;
885 }
886 }
887 /* no other ioctl requires translation */
888 return dispatch_ioctl_command(cmd, arg);
889 }
890
891 #endif /* CONFIG_COMPAT is in .config */
892
893 /*
894 * The following two ioctl32 functions had been refactored into the above
895 * CONFIG_COMPAT ifdef, but that was an over simplification that was
896 * not noticed until we tried to compile on power pc...
897 */
898 #if (defined(CONFIG_COMPAT) && !defined(HAVE_REGISTER_IOCTL32_CONVERSION)) || !defined(CONFIG_COMPAT)
899 static int orangefs_ioctl32_init(void)
900 {
901 return 0;
902 }
903
904 static void orangefs_ioctl32_cleanup(void)
905 {
906 return;
907 }
908 #endif
909
910 /* the assigned character device major number */
911 static int orangefs_dev_major;
912
913 /*
914 * Initialize orangefs device specific state:
915 * Must be called at module load time only
916 */
917 int orangefs_dev_init(void)
918 {
919 int ret;
920
921 /* register the ioctl32 sub-system */
922 ret = orangefs_ioctl32_init();
923 if (ret < 0)
924 return ret;
925
926 /* register orangefs-req device */
927 orangefs_dev_major = register_chrdev(0,
928 ORANGEFS_REQDEVICE_NAME,
929 &orangefs_devreq_file_operations);
930 if (orangefs_dev_major < 0) {
931 gossip_debug(GOSSIP_DEV_DEBUG,
932 "Failed to register /dev/%s (error %d)\n",
933 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
934 orangefs_ioctl32_cleanup();
935 return orangefs_dev_major;
936 }
937
938 gossip_debug(GOSSIP_DEV_DEBUG,
939 "*** /dev/%s character device registered ***\n",
940 ORANGEFS_REQDEVICE_NAME);
941 gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n",
942 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
943 return 0;
944 }
945
946 void orangefs_dev_cleanup(void)
947 {
948 unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME);
949 gossip_debug(GOSSIP_DEV_DEBUG,
950 "*** /dev/%s character device unregistered ***\n",
951 ORANGEFS_REQDEVICE_NAME);
952 /* unregister the ioctl32 sub-system */
953 orangefs_ioctl32_cleanup();
954 }
955
956 static unsigned int orangefs_devreq_poll(struct file *file,
957 struct poll_table_struct *poll_table)
958 {
959 int poll_revent_mask = 0;
960
961 if (open_access_count == 1) {
962 poll_wait(file, &orangefs_request_list_waitq, poll_table);
963
964 spin_lock(&orangefs_request_list_lock);
965 if (!list_empty(&orangefs_request_list))
966 poll_revent_mask |= POLL_IN;
967 spin_unlock(&orangefs_request_list_lock);
968 }
969 return poll_revent_mask;
970 }
971
972 const struct file_operations orangefs_devreq_file_operations = {
973 .owner = THIS_MODULE,
974 .read = orangefs_devreq_read,
975 .write_iter = orangefs_devreq_write_iter,
976 .open = orangefs_devreq_open,
977 .release = orangefs_devreq_release,
978 .unlocked_ioctl = orangefs_devreq_ioctl,
979
980 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
981 .compat_ioctl = orangefs_devreq_compat_ioctl,
982 #endif
983 .poll = orangefs_devreq_poll
984 };