]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/fuse/dev.c
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / fs / fuse / dev.c
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/sched/signal.h>
15 #include <linux/uio.h>
16 #include <linux/miscdevice.h>
17 #include <linux/pagemap.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/pipe_fs_i.h>
21 #include <linux/swap.h>
22 #include <linux/splice.h>
23 #include <linux/sched.h>
24
25 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
26 MODULE_ALIAS("devname:fuse");
27
28 static struct kmem_cache *fuse_req_cachep;
29
30 static struct fuse_dev *fuse_get_dev(struct file *file)
31 {
32 /*
33 * Lockless access is OK, because file->private data is set
34 * once during mount and is valid until the file is released.
35 */
36 return ACCESS_ONCE(file->private_data);
37 }
38
39 static void fuse_request_init(struct fuse_req *req, struct page **pages,
40 struct fuse_page_desc *page_descs,
41 unsigned npages)
42 {
43 memset(req, 0, sizeof(*req));
44 memset(pages, 0, sizeof(*pages) * npages);
45 memset(page_descs, 0, sizeof(*page_descs) * npages);
46 INIT_LIST_HEAD(&req->list);
47 INIT_LIST_HEAD(&req->intr_entry);
48 init_waitqueue_head(&req->waitq);
49 refcount_set(&req->count, 1);
50 req->pages = pages;
51 req->page_descs = page_descs;
52 req->max_pages = npages;
53 __set_bit(FR_PENDING, &req->flags);
54 }
55
56 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
57 {
58 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
59 if (req) {
60 struct page **pages;
61 struct fuse_page_desc *page_descs;
62
63 if (npages <= FUSE_REQ_INLINE_PAGES) {
64 pages = req->inline_pages;
65 page_descs = req->inline_page_descs;
66 } else {
67 pages = kmalloc(sizeof(struct page *) * npages, flags);
68 page_descs = kmalloc(sizeof(struct fuse_page_desc) *
69 npages, flags);
70 }
71
72 if (!pages || !page_descs) {
73 kfree(pages);
74 kfree(page_descs);
75 kmem_cache_free(fuse_req_cachep, req);
76 return NULL;
77 }
78
79 fuse_request_init(req, pages, page_descs, npages);
80 }
81 return req;
82 }
83
84 struct fuse_req *fuse_request_alloc(unsigned npages)
85 {
86 return __fuse_request_alloc(npages, GFP_KERNEL);
87 }
88 EXPORT_SYMBOL_GPL(fuse_request_alloc);
89
90 struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
91 {
92 return __fuse_request_alloc(npages, GFP_NOFS);
93 }
94
95 void fuse_request_free(struct fuse_req *req)
96 {
97 if (req->pages != req->inline_pages) {
98 kfree(req->pages);
99 kfree(req->page_descs);
100 }
101 kmem_cache_free(fuse_req_cachep, req);
102 }
103
104 void __fuse_get_request(struct fuse_req *req)
105 {
106 refcount_inc(&req->count);
107 }
108
109 /* Must be called with > 1 refcount */
110 static void __fuse_put_request(struct fuse_req *req)
111 {
112 refcount_dec(&req->count);
113 }
114
115 static void fuse_req_init_context(struct fuse_conn *fc, struct fuse_req *req)
116 {
117 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
118 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
119 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
120 }
121
122 void fuse_set_initialized(struct fuse_conn *fc)
123 {
124 /* Make sure stores before this are seen on another CPU */
125 smp_wmb();
126 fc->initialized = 1;
127 }
128
129 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
130 {
131 return !fc->initialized || (for_background && fc->blocked);
132 }
133
134 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
135 bool for_background)
136 {
137 struct fuse_req *req;
138 int err;
139 atomic_inc(&fc->num_waiting);
140
141 if (fuse_block_alloc(fc, for_background)) {
142 err = -EINTR;
143 if (wait_event_killable_exclusive(fc->blocked_waitq,
144 !fuse_block_alloc(fc, for_background)))
145 goto out;
146 }
147 /* Matches smp_wmb() in fuse_set_initialized() */
148 smp_rmb();
149
150 err = -ENOTCONN;
151 if (!fc->connected)
152 goto out;
153
154 err = -ECONNREFUSED;
155 if (fc->conn_error)
156 goto out;
157
158 req = fuse_request_alloc(npages);
159 err = -ENOMEM;
160 if (!req) {
161 if (for_background)
162 wake_up(&fc->blocked_waitq);
163 goto out;
164 }
165
166 fuse_req_init_context(fc, req);
167 __set_bit(FR_WAITING, &req->flags);
168 if (for_background)
169 __set_bit(FR_BACKGROUND, &req->flags);
170
171 return req;
172
173 out:
174 atomic_dec(&fc->num_waiting);
175 return ERR_PTR(err);
176 }
177
178 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
179 {
180 return __fuse_get_req(fc, npages, false);
181 }
182 EXPORT_SYMBOL_GPL(fuse_get_req);
183
184 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
185 unsigned npages)
186 {
187 return __fuse_get_req(fc, npages, true);
188 }
189 EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
190
191 /*
192 * Return request in fuse_file->reserved_req. However that may
193 * currently be in use. If that is the case, wait for it to become
194 * available.
195 */
196 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
197 struct file *file)
198 {
199 struct fuse_req *req = NULL;
200 struct fuse_file *ff = file->private_data;
201
202 do {
203 wait_event(fc->reserved_req_waitq, ff->reserved_req);
204 spin_lock(&fc->lock);
205 if (ff->reserved_req) {
206 req = ff->reserved_req;
207 ff->reserved_req = NULL;
208 req->stolen_file = get_file(file);
209 }
210 spin_unlock(&fc->lock);
211 } while (!req);
212
213 return req;
214 }
215
216 /*
217 * Put stolen request back into fuse_file->reserved_req
218 */
219 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
220 {
221 struct file *file = req->stolen_file;
222 struct fuse_file *ff = file->private_data;
223
224 spin_lock(&fc->lock);
225 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
226 BUG_ON(ff->reserved_req);
227 ff->reserved_req = req;
228 wake_up_all(&fc->reserved_req_waitq);
229 spin_unlock(&fc->lock);
230 fput(file);
231 }
232
233 /*
234 * Gets a requests for a file operation, always succeeds
235 *
236 * This is used for sending the FLUSH request, which must get to
237 * userspace, due to POSIX locks which may need to be unlocked.
238 *
239 * If allocation fails due to OOM, use the reserved request in
240 * fuse_file.
241 *
242 * This is very unlikely to deadlock accidentally, since the
243 * filesystem should not have it's own file open. If deadlock is
244 * intentional, it can still be broken by "aborting" the filesystem.
245 */
246 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
247 struct file *file)
248 {
249 struct fuse_req *req;
250
251 atomic_inc(&fc->num_waiting);
252 wait_event(fc->blocked_waitq, fc->initialized);
253 /* Matches smp_wmb() in fuse_set_initialized() */
254 smp_rmb();
255 req = fuse_request_alloc(0);
256 if (!req)
257 req = get_reserved_req(fc, file);
258
259 fuse_req_init_context(fc, req);
260 __set_bit(FR_WAITING, &req->flags);
261 __clear_bit(FR_BACKGROUND, &req->flags);
262 return req;
263 }
264
265 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
266 {
267 if (refcount_dec_and_test(&req->count)) {
268 if (test_bit(FR_BACKGROUND, &req->flags)) {
269 /*
270 * We get here in the unlikely case that a background
271 * request was allocated but not sent
272 */
273 spin_lock(&fc->lock);
274 if (!fc->blocked)
275 wake_up(&fc->blocked_waitq);
276 spin_unlock(&fc->lock);
277 }
278
279 if (test_bit(FR_WAITING, &req->flags)) {
280 __clear_bit(FR_WAITING, &req->flags);
281 atomic_dec(&fc->num_waiting);
282 }
283
284 if (req->stolen_file)
285 put_reserved_req(fc, req);
286 else
287 fuse_request_free(req);
288 }
289 }
290 EXPORT_SYMBOL_GPL(fuse_put_request);
291
292 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
293 {
294 unsigned nbytes = 0;
295 unsigned i;
296
297 for (i = 0; i < numargs; i++)
298 nbytes += args[i].size;
299
300 return nbytes;
301 }
302
303 static u64 fuse_get_unique(struct fuse_iqueue *fiq)
304 {
305 return ++fiq->reqctr;
306 }
307
308 static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
309 {
310 req->in.h.len = sizeof(struct fuse_in_header) +
311 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
312 list_add_tail(&req->list, &fiq->pending);
313 wake_up_locked(&fiq->waitq);
314 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
315 }
316
317 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
318 u64 nodeid, u64 nlookup)
319 {
320 struct fuse_iqueue *fiq = &fc->iq;
321
322 forget->forget_one.nodeid = nodeid;
323 forget->forget_one.nlookup = nlookup;
324
325 spin_lock(&fiq->waitq.lock);
326 if (fiq->connected) {
327 fiq->forget_list_tail->next = forget;
328 fiq->forget_list_tail = forget;
329 wake_up_locked(&fiq->waitq);
330 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
331 } else {
332 kfree(forget);
333 }
334 spin_unlock(&fiq->waitq.lock);
335 }
336
337 static void flush_bg_queue(struct fuse_conn *fc)
338 {
339 while (fc->active_background < fc->max_background &&
340 !list_empty(&fc->bg_queue)) {
341 struct fuse_req *req;
342 struct fuse_iqueue *fiq = &fc->iq;
343
344 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
345 list_del(&req->list);
346 fc->active_background++;
347 spin_lock(&fiq->waitq.lock);
348 req->in.h.unique = fuse_get_unique(fiq);
349 queue_request(fiq, req);
350 spin_unlock(&fiq->waitq.lock);
351 }
352 }
353
354 /*
355 * This function is called when a request is finished. Either a reply
356 * has arrived or it was aborted (and not yet sent) or some error
357 * occurred during communication with userspace, or the device file
358 * was closed. The requester thread is woken up (if still waiting),
359 * the 'end' callback is called if given, else the reference to the
360 * request is released
361 */
362 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
363 {
364 struct fuse_iqueue *fiq = &fc->iq;
365
366 if (test_and_set_bit(FR_FINISHED, &req->flags))
367 return;
368
369 spin_lock(&fiq->waitq.lock);
370 list_del_init(&req->intr_entry);
371 spin_unlock(&fiq->waitq.lock);
372 WARN_ON(test_bit(FR_PENDING, &req->flags));
373 WARN_ON(test_bit(FR_SENT, &req->flags));
374 if (test_bit(FR_BACKGROUND, &req->flags)) {
375 spin_lock(&fc->lock);
376 clear_bit(FR_BACKGROUND, &req->flags);
377 if (fc->num_background == fc->max_background)
378 fc->blocked = 0;
379
380 /* Wake up next waiter, if any */
381 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
382 wake_up(&fc->blocked_waitq);
383
384 if (fc->num_background == fc->congestion_threshold &&
385 fc->connected && fc->sb) {
386 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
387 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
388 }
389 fc->num_background--;
390 fc->active_background--;
391 flush_bg_queue(fc);
392 spin_unlock(&fc->lock);
393 }
394 wake_up(&req->waitq);
395 if (req->end)
396 req->end(fc, req);
397 fuse_put_request(fc, req);
398 }
399
400 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
401 {
402 spin_lock(&fiq->waitq.lock);
403 if (test_bit(FR_FINISHED, &req->flags)) {
404 spin_unlock(&fiq->waitq.lock);
405 return;
406 }
407 if (list_empty(&req->intr_entry)) {
408 list_add_tail(&req->intr_entry, &fiq->interrupts);
409 wake_up_locked(&fiq->waitq);
410 }
411 spin_unlock(&fiq->waitq.lock);
412 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
413 }
414
415 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
416 {
417 struct fuse_iqueue *fiq = &fc->iq;
418 int err;
419
420 if (!fc->no_interrupt) {
421 /* Any signal may interrupt this */
422 err = wait_event_interruptible(req->waitq,
423 test_bit(FR_FINISHED, &req->flags));
424 if (!err)
425 return;
426
427 set_bit(FR_INTERRUPTED, &req->flags);
428 /* matches barrier in fuse_dev_do_read() */
429 smp_mb__after_atomic();
430 if (test_bit(FR_SENT, &req->flags))
431 queue_interrupt(fiq, req);
432 }
433
434 if (!test_bit(FR_FORCE, &req->flags)) {
435 /* Only fatal signals may interrupt this */
436 err = wait_event_killable(req->waitq,
437 test_bit(FR_FINISHED, &req->flags));
438 if (!err)
439 return;
440
441 spin_lock(&fiq->waitq.lock);
442 /* Request is not yet in userspace, bail out */
443 if (test_bit(FR_PENDING, &req->flags)) {
444 list_del(&req->list);
445 spin_unlock(&fiq->waitq.lock);
446 __fuse_put_request(req);
447 req->out.h.error = -EINTR;
448 return;
449 }
450 spin_unlock(&fiq->waitq.lock);
451 }
452
453 /*
454 * Either request is already in userspace, or it was forced.
455 * Wait it out.
456 */
457 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
458 }
459
460 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
461 {
462 struct fuse_iqueue *fiq = &fc->iq;
463
464 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
465 spin_lock(&fiq->waitq.lock);
466 if (!fiq->connected) {
467 spin_unlock(&fiq->waitq.lock);
468 req->out.h.error = -ENOTCONN;
469 } else {
470 req->in.h.unique = fuse_get_unique(fiq);
471 queue_request(fiq, req);
472 /* acquire extra reference, since request is still needed
473 after request_end() */
474 __fuse_get_request(req);
475 spin_unlock(&fiq->waitq.lock);
476
477 request_wait_answer(fc, req);
478 /* Pairs with smp_wmb() in request_end() */
479 smp_rmb();
480 }
481 }
482
483 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
484 {
485 __set_bit(FR_ISREPLY, &req->flags);
486 if (!test_bit(FR_WAITING, &req->flags)) {
487 __set_bit(FR_WAITING, &req->flags);
488 atomic_inc(&fc->num_waiting);
489 }
490 __fuse_request_send(fc, req);
491 }
492 EXPORT_SYMBOL_GPL(fuse_request_send);
493
494 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
495 {
496 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
497 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
498
499 if (fc->minor < 9) {
500 switch (args->in.h.opcode) {
501 case FUSE_LOOKUP:
502 case FUSE_CREATE:
503 case FUSE_MKNOD:
504 case FUSE_MKDIR:
505 case FUSE_SYMLINK:
506 case FUSE_LINK:
507 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
508 break;
509 case FUSE_GETATTR:
510 case FUSE_SETATTR:
511 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
512 break;
513 }
514 }
515 if (fc->minor < 12) {
516 switch (args->in.h.opcode) {
517 case FUSE_CREATE:
518 args->in.args[0].size = sizeof(struct fuse_open_in);
519 break;
520 case FUSE_MKNOD:
521 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
522 break;
523 }
524 }
525 }
526
527 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
528 {
529 struct fuse_req *req;
530 ssize_t ret;
531
532 req = fuse_get_req(fc, 0);
533 if (IS_ERR(req))
534 return PTR_ERR(req);
535
536 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
537 fuse_adjust_compat(fc, args);
538
539 req->in.h.opcode = args->in.h.opcode;
540 req->in.h.nodeid = args->in.h.nodeid;
541 req->in.numargs = args->in.numargs;
542 memcpy(req->in.args, args->in.args,
543 args->in.numargs * sizeof(struct fuse_in_arg));
544 req->out.argvar = args->out.argvar;
545 req->out.numargs = args->out.numargs;
546 memcpy(req->out.args, args->out.args,
547 args->out.numargs * sizeof(struct fuse_arg));
548 fuse_request_send(fc, req);
549 ret = req->out.h.error;
550 if (!ret && args->out.argvar) {
551 BUG_ON(args->out.numargs != 1);
552 ret = req->out.args[0].size;
553 }
554 fuse_put_request(fc, req);
555
556 return ret;
557 }
558
559 /*
560 * Called under fc->lock
561 *
562 * fc->connected must have been checked previously
563 */
564 void fuse_request_send_background_locked(struct fuse_conn *fc,
565 struct fuse_req *req)
566 {
567 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
568 if (!test_bit(FR_WAITING, &req->flags)) {
569 __set_bit(FR_WAITING, &req->flags);
570 atomic_inc(&fc->num_waiting);
571 }
572 __set_bit(FR_ISREPLY, &req->flags);
573 fc->num_background++;
574 if (fc->num_background == fc->max_background)
575 fc->blocked = 1;
576 if (fc->num_background == fc->congestion_threshold && fc->sb) {
577 set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
578 set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
579 }
580 list_add_tail(&req->list, &fc->bg_queue);
581 flush_bg_queue(fc);
582 }
583
584 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
585 {
586 BUG_ON(!req->end);
587 spin_lock(&fc->lock);
588 if (fc->connected) {
589 fuse_request_send_background_locked(fc, req);
590 spin_unlock(&fc->lock);
591 } else {
592 spin_unlock(&fc->lock);
593 req->out.h.error = -ENOTCONN;
594 req->end(fc, req);
595 fuse_put_request(fc, req);
596 }
597 }
598 EXPORT_SYMBOL_GPL(fuse_request_send_background);
599
600 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
601 struct fuse_req *req, u64 unique)
602 {
603 int err = -ENODEV;
604 struct fuse_iqueue *fiq = &fc->iq;
605
606 __clear_bit(FR_ISREPLY, &req->flags);
607 req->in.h.unique = unique;
608 spin_lock(&fiq->waitq.lock);
609 if (fiq->connected) {
610 queue_request(fiq, req);
611 err = 0;
612 }
613 spin_unlock(&fiq->waitq.lock);
614
615 return err;
616 }
617
618 void fuse_force_forget(struct file *file, u64 nodeid)
619 {
620 struct inode *inode = file_inode(file);
621 struct fuse_conn *fc = get_fuse_conn(inode);
622 struct fuse_req *req;
623 struct fuse_forget_in inarg;
624
625 memset(&inarg, 0, sizeof(inarg));
626 inarg.nlookup = 1;
627 req = fuse_get_req_nofail_nopages(fc, file);
628 req->in.h.opcode = FUSE_FORGET;
629 req->in.h.nodeid = nodeid;
630 req->in.numargs = 1;
631 req->in.args[0].size = sizeof(inarg);
632 req->in.args[0].value = &inarg;
633 __clear_bit(FR_ISREPLY, &req->flags);
634 __fuse_request_send(fc, req);
635 /* ignore errors */
636 fuse_put_request(fc, req);
637 }
638
639 /*
640 * Lock the request. Up to the next unlock_request() there mustn't be
641 * anything that could cause a page-fault. If the request was already
642 * aborted bail out.
643 */
644 static int lock_request(struct fuse_req *req)
645 {
646 int err = 0;
647 if (req) {
648 spin_lock(&req->waitq.lock);
649 if (test_bit(FR_ABORTED, &req->flags))
650 err = -ENOENT;
651 else
652 set_bit(FR_LOCKED, &req->flags);
653 spin_unlock(&req->waitq.lock);
654 }
655 return err;
656 }
657
658 /*
659 * Unlock request. If it was aborted while locked, caller is responsible
660 * for unlocking and ending the request.
661 */
662 static int unlock_request(struct fuse_req *req)
663 {
664 int err = 0;
665 if (req) {
666 spin_lock(&req->waitq.lock);
667 if (test_bit(FR_ABORTED, &req->flags))
668 err = -ENOENT;
669 else
670 clear_bit(FR_LOCKED, &req->flags);
671 spin_unlock(&req->waitq.lock);
672 }
673 return err;
674 }
675
676 struct fuse_copy_state {
677 int write;
678 struct fuse_req *req;
679 struct iov_iter *iter;
680 struct pipe_buffer *pipebufs;
681 struct pipe_buffer *currbuf;
682 struct pipe_inode_info *pipe;
683 unsigned long nr_segs;
684 struct page *pg;
685 unsigned len;
686 unsigned offset;
687 unsigned move_pages:1;
688 };
689
690 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
691 struct iov_iter *iter)
692 {
693 memset(cs, 0, sizeof(*cs));
694 cs->write = write;
695 cs->iter = iter;
696 }
697
698 /* Unmap and put previous page of userspace buffer */
699 static void fuse_copy_finish(struct fuse_copy_state *cs)
700 {
701 if (cs->currbuf) {
702 struct pipe_buffer *buf = cs->currbuf;
703
704 if (cs->write)
705 buf->len = PAGE_SIZE - cs->len;
706 cs->currbuf = NULL;
707 } else if (cs->pg) {
708 if (cs->write) {
709 flush_dcache_page(cs->pg);
710 set_page_dirty_lock(cs->pg);
711 }
712 put_page(cs->pg);
713 }
714 cs->pg = NULL;
715 }
716
717 /*
718 * Get another pagefull of userspace buffer, and map it to kernel
719 * address space, and lock request
720 */
721 static int fuse_copy_fill(struct fuse_copy_state *cs)
722 {
723 struct page *page;
724 int err;
725
726 err = unlock_request(cs->req);
727 if (err)
728 return err;
729
730 fuse_copy_finish(cs);
731 if (cs->pipebufs) {
732 struct pipe_buffer *buf = cs->pipebufs;
733
734 if (!cs->write) {
735 err = pipe_buf_confirm(cs->pipe, buf);
736 if (err)
737 return err;
738
739 BUG_ON(!cs->nr_segs);
740 cs->currbuf = buf;
741 cs->pg = buf->page;
742 cs->offset = buf->offset;
743 cs->len = buf->len;
744 cs->pipebufs++;
745 cs->nr_segs--;
746 } else {
747 if (cs->nr_segs == cs->pipe->buffers)
748 return -EIO;
749
750 page = alloc_page(GFP_HIGHUSER);
751 if (!page)
752 return -ENOMEM;
753
754 buf->page = page;
755 buf->offset = 0;
756 buf->len = 0;
757
758 cs->currbuf = buf;
759 cs->pg = page;
760 cs->offset = 0;
761 cs->len = PAGE_SIZE;
762 cs->pipebufs++;
763 cs->nr_segs++;
764 }
765 } else {
766 size_t off;
767 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
768 if (err < 0)
769 return err;
770 BUG_ON(!err);
771 cs->len = err;
772 cs->offset = off;
773 cs->pg = page;
774 iov_iter_advance(cs->iter, err);
775 }
776
777 return lock_request(cs->req);
778 }
779
780 /* Do as much copy to/from userspace buffer as we can */
781 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
782 {
783 unsigned ncpy = min(*size, cs->len);
784 if (val) {
785 void *pgaddr = kmap_atomic(cs->pg);
786 void *buf = pgaddr + cs->offset;
787
788 if (cs->write)
789 memcpy(buf, *val, ncpy);
790 else
791 memcpy(*val, buf, ncpy);
792
793 kunmap_atomic(pgaddr);
794 *val += ncpy;
795 }
796 *size -= ncpy;
797 cs->len -= ncpy;
798 cs->offset += ncpy;
799 return ncpy;
800 }
801
802 static int fuse_check_page(struct page *page)
803 {
804 if (page_mapcount(page) ||
805 page->mapping != NULL ||
806 page_count(page) != 1 ||
807 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
808 ~(1 << PG_locked |
809 1 << PG_referenced |
810 1 << PG_uptodate |
811 1 << PG_lru |
812 1 << PG_active |
813 1 << PG_reclaim))) {
814 printk(KERN_WARNING "fuse: trying to steal weird page\n");
815 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
816 return 1;
817 }
818 return 0;
819 }
820
821 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
822 {
823 int err;
824 struct page *oldpage = *pagep;
825 struct page *newpage;
826 struct pipe_buffer *buf = cs->pipebufs;
827
828 err = unlock_request(cs->req);
829 if (err)
830 return err;
831
832 fuse_copy_finish(cs);
833
834 err = pipe_buf_confirm(cs->pipe, buf);
835 if (err)
836 return err;
837
838 BUG_ON(!cs->nr_segs);
839 cs->currbuf = buf;
840 cs->len = buf->len;
841 cs->pipebufs++;
842 cs->nr_segs--;
843
844 if (cs->len != PAGE_SIZE)
845 goto out_fallback;
846
847 if (pipe_buf_steal(cs->pipe, buf) != 0)
848 goto out_fallback;
849
850 newpage = buf->page;
851
852 if (!PageUptodate(newpage))
853 SetPageUptodate(newpage);
854
855 ClearPageMappedToDisk(newpage);
856
857 if (fuse_check_page(newpage) != 0)
858 goto out_fallback_unlock;
859
860 /*
861 * This is a new and locked page, it shouldn't be mapped or
862 * have any special flags on it
863 */
864 if (WARN_ON(page_mapped(oldpage)))
865 goto out_fallback_unlock;
866 if (WARN_ON(page_has_private(oldpage)))
867 goto out_fallback_unlock;
868 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
869 goto out_fallback_unlock;
870 if (WARN_ON(PageMlocked(oldpage)))
871 goto out_fallback_unlock;
872
873 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
874 if (err) {
875 unlock_page(newpage);
876 return err;
877 }
878
879 get_page(newpage);
880
881 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
882 lru_cache_add_file(newpage);
883
884 err = 0;
885 spin_lock(&cs->req->waitq.lock);
886 if (test_bit(FR_ABORTED, &cs->req->flags))
887 err = -ENOENT;
888 else
889 *pagep = newpage;
890 spin_unlock(&cs->req->waitq.lock);
891
892 if (err) {
893 unlock_page(newpage);
894 put_page(newpage);
895 return err;
896 }
897
898 unlock_page(oldpage);
899 put_page(oldpage);
900 cs->len = 0;
901
902 return 0;
903
904 out_fallback_unlock:
905 unlock_page(newpage);
906 out_fallback:
907 cs->pg = buf->page;
908 cs->offset = buf->offset;
909
910 err = lock_request(cs->req);
911 if (err)
912 return err;
913
914 return 1;
915 }
916
917 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
918 unsigned offset, unsigned count)
919 {
920 struct pipe_buffer *buf;
921 int err;
922
923 if (cs->nr_segs == cs->pipe->buffers)
924 return -EIO;
925
926 err = unlock_request(cs->req);
927 if (err)
928 return err;
929
930 fuse_copy_finish(cs);
931
932 buf = cs->pipebufs;
933 get_page(page);
934 buf->page = page;
935 buf->offset = offset;
936 buf->len = count;
937
938 cs->pipebufs++;
939 cs->nr_segs++;
940 cs->len = 0;
941
942 return 0;
943 }
944
945 /*
946 * Copy a page in the request to/from the userspace buffer. Must be
947 * done atomically
948 */
949 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
950 unsigned offset, unsigned count, int zeroing)
951 {
952 int err;
953 struct page *page = *pagep;
954
955 if (page && zeroing && count < PAGE_SIZE)
956 clear_highpage(page);
957
958 while (count) {
959 if (cs->write && cs->pipebufs && page) {
960 return fuse_ref_page(cs, page, offset, count);
961 } else if (!cs->len) {
962 if (cs->move_pages && page &&
963 offset == 0 && count == PAGE_SIZE) {
964 err = fuse_try_move_page(cs, pagep);
965 if (err <= 0)
966 return err;
967 } else {
968 err = fuse_copy_fill(cs);
969 if (err)
970 return err;
971 }
972 }
973 if (page) {
974 void *mapaddr = kmap_atomic(page);
975 void *buf = mapaddr + offset;
976 offset += fuse_copy_do(cs, &buf, &count);
977 kunmap_atomic(mapaddr);
978 } else
979 offset += fuse_copy_do(cs, NULL, &count);
980 }
981 if (page && !cs->write)
982 flush_dcache_page(page);
983 return 0;
984 }
985
986 /* Copy pages in the request to/from userspace buffer */
987 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
988 int zeroing)
989 {
990 unsigned i;
991 struct fuse_req *req = cs->req;
992
993 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
994 int err;
995 unsigned offset = req->page_descs[i].offset;
996 unsigned count = min(nbytes, req->page_descs[i].length);
997
998 err = fuse_copy_page(cs, &req->pages[i], offset, count,
999 zeroing);
1000 if (err)
1001 return err;
1002
1003 nbytes -= count;
1004 }
1005 return 0;
1006 }
1007
1008 /* Copy a single argument in the request to/from userspace buffer */
1009 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1010 {
1011 while (size) {
1012 if (!cs->len) {
1013 int err = fuse_copy_fill(cs);
1014 if (err)
1015 return err;
1016 }
1017 fuse_copy_do(cs, &val, &size);
1018 }
1019 return 0;
1020 }
1021
1022 /* Copy request arguments to/from userspace buffer */
1023 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1024 unsigned argpages, struct fuse_arg *args,
1025 int zeroing)
1026 {
1027 int err = 0;
1028 unsigned i;
1029
1030 for (i = 0; !err && i < numargs; i++) {
1031 struct fuse_arg *arg = &args[i];
1032 if (i == numargs - 1 && argpages)
1033 err = fuse_copy_pages(cs, arg->size, zeroing);
1034 else
1035 err = fuse_copy_one(cs, arg->value, arg->size);
1036 }
1037 return err;
1038 }
1039
1040 static int forget_pending(struct fuse_iqueue *fiq)
1041 {
1042 return fiq->forget_list_head.next != NULL;
1043 }
1044
1045 static int request_pending(struct fuse_iqueue *fiq)
1046 {
1047 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1048 forget_pending(fiq);
1049 }
1050
1051 /*
1052 * Transfer an interrupt request to userspace
1053 *
1054 * Unlike other requests this is assembled on demand, without a need
1055 * to allocate a separate fuse_req structure.
1056 *
1057 * Called with fiq->waitq.lock held, releases it
1058 */
1059 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1060 struct fuse_copy_state *cs,
1061 size_t nbytes, struct fuse_req *req)
1062 __releases(fiq->waitq.lock)
1063 {
1064 struct fuse_in_header ih;
1065 struct fuse_interrupt_in arg;
1066 unsigned reqsize = sizeof(ih) + sizeof(arg);
1067 int err;
1068
1069 list_del_init(&req->intr_entry);
1070 req->intr_unique = fuse_get_unique(fiq);
1071 memset(&ih, 0, sizeof(ih));
1072 memset(&arg, 0, sizeof(arg));
1073 ih.len = reqsize;
1074 ih.opcode = FUSE_INTERRUPT;
1075 ih.unique = req->intr_unique;
1076 arg.unique = req->in.h.unique;
1077
1078 spin_unlock(&fiq->waitq.lock);
1079 if (nbytes < reqsize)
1080 return -EINVAL;
1081
1082 err = fuse_copy_one(cs, &ih, sizeof(ih));
1083 if (!err)
1084 err = fuse_copy_one(cs, &arg, sizeof(arg));
1085 fuse_copy_finish(cs);
1086
1087 return err ? err : reqsize;
1088 }
1089
1090 static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
1091 unsigned max,
1092 unsigned *countp)
1093 {
1094 struct fuse_forget_link *head = fiq->forget_list_head.next;
1095 struct fuse_forget_link **newhead = &head;
1096 unsigned count;
1097
1098 for (count = 0; *newhead != NULL && count < max; count++)
1099 newhead = &(*newhead)->next;
1100
1101 fiq->forget_list_head.next = *newhead;
1102 *newhead = NULL;
1103 if (fiq->forget_list_head.next == NULL)
1104 fiq->forget_list_tail = &fiq->forget_list_head;
1105
1106 if (countp != NULL)
1107 *countp = count;
1108
1109 return head;
1110 }
1111
1112 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1113 struct fuse_copy_state *cs,
1114 size_t nbytes)
1115 __releases(fiq->waitq.lock)
1116 {
1117 int err;
1118 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
1119 struct fuse_forget_in arg = {
1120 .nlookup = forget->forget_one.nlookup,
1121 };
1122 struct fuse_in_header ih = {
1123 .opcode = FUSE_FORGET,
1124 .nodeid = forget->forget_one.nodeid,
1125 .unique = fuse_get_unique(fiq),
1126 .len = sizeof(ih) + sizeof(arg),
1127 };
1128
1129 spin_unlock(&fiq->waitq.lock);
1130 kfree(forget);
1131 if (nbytes < ih.len)
1132 return -EINVAL;
1133
1134 err = fuse_copy_one(cs, &ih, sizeof(ih));
1135 if (!err)
1136 err = fuse_copy_one(cs, &arg, sizeof(arg));
1137 fuse_copy_finish(cs);
1138
1139 if (err)
1140 return err;
1141
1142 return ih.len;
1143 }
1144
1145 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1146 struct fuse_copy_state *cs, size_t nbytes)
1147 __releases(fiq->waitq.lock)
1148 {
1149 int err;
1150 unsigned max_forgets;
1151 unsigned count;
1152 struct fuse_forget_link *head;
1153 struct fuse_batch_forget_in arg = { .count = 0 };
1154 struct fuse_in_header ih = {
1155 .opcode = FUSE_BATCH_FORGET,
1156 .unique = fuse_get_unique(fiq),
1157 .len = sizeof(ih) + sizeof(arg),
1158 };
1159
1160 if (nbytes < ih.len) {
1161 spin_unlock(&fiq->waitq.lock);
1162 return -EINVAL;
1163 }
1164
1165 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1166 head = dequeue_forget(fiq, max_forgets, &count);
1167 spin_unlock(&fiq->waitq.lock);
1168
1169 arg.count = count;
1170 ih.len += count * sizeof(struct fuse_forget_one);
1171 err = fuse_copy_one(cs, &ih, sizeof(ih));
1172 if (!err)
1173 err = fuse_copy_one(cs, &arg, sizeof(arg));
1174
1175 while (head) {
1176 struct fuse_forget_link *forget = head;
1177
1178 if (!err) {
1179 err = fuse_copy_one(cs, &forget->forget_one,
1180 sizeof(forget->forget_one));
1181 }
1182 head = forget->next;
1183 kfree(forget);
1184 }
1185
1186 fuse_copy_finish(cs);
1187
1188 if (err)
1189 return err;
1190
1191 return ih.len;
1192 }
1193
1194 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1195 struct fuse_copy_state *cs,
1196 size_t nbytes)
1197 __releases(fiq->waitq.lock)
1198 {
1199 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1200 return fuse_read_single_forget(fiq, cs, nbytes);
1201 else
1202 return fuse_read_batch_forget(fiq, cs, nbytes);
1203 }
1204
1205 /*
1206 * Read a single request into the userspace filesystem's buffer. This
1207 * function waits until a request is available, then removes it from
1208 * the pending list and copies request data to userspace buffer. If
1209 * no reply is needed (FORGET) or request has been aborted or there
1210 * was an error during the copying then it's finished by calling
1211 * request_end(). Otherwise add it to the processing list, and set
1212 * the 'sent' flag.
1213 */
1214 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1215 struct fuse_copy_state *cs, size_t nbytes)
1216 {
1217 ssize_t err;
1218 struct fuse_conn *fc = fud->fc;
1219 struct fuse_iqueue *fiq = &fc->iq;
1220 struct fuse_pqueue *fpq = &fud->pq;
1221 struct fuse_req *req;
1222 struct fuse_in *in;
1223 unsigned reqsize;
1224
1225 if (task_active_pid_ns(current) != fc->pid_ns)
1226 return -EIO;
1227
1228 restart:
1229 spin_lock(&fiq->waitq.lock);
1230 err = -EAGAIN;
1231 if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
1232 !request_pending(fiq))
1233 goto err_unlock;
1234
1235 err = wait_event_interruptible_exclusive_locked(fiq->waitq,
1236 !fiq->connected || request_pending(fiq));
1237 if (err)
1238 goto err_unlock;
1239
1240 err = -ENODEV;
1241 if (!fiq->connected)
1242 goto err_unlock;
1243
1244 if (!list_empty(&fiq->interrupts)) {
1245 req = list_entry(fiq->interrupts.next, struct fuse_req,
1246 intr_entry);
1247 return fuse_read_interrupt(fiq, cs, nbytes, req);
1248 }
1249
1250 if (forget_pending(fiq)) {
1251 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1252 return fuse_read_forget(fc, fiq, cs, nbytes);
1253
1254 if (fiq->forget_batch <= -8)
1255 fiq->forget_batch = 16;
1256 }
1257
1258 req = list_entry(fiq->pending.next, struct fuse_req, list);
1259 clear_bit(FR_PENDING, &req->flags);
1260 list_del_init(&req->list);
1261 spin_unlock(&fiq->waitq.lock);
1262
1263 in = &req->in;
1264 reqsize = in->h.len;
1265 /* If request is too large, reply with an error and restart the read */
1266 if (nbytes < reqsize) {
1267 req->out.h.error = -EIO;
1268 /* SETXATTR is special, since it may contain too large data */
1269 if (in->h.opcode == FUSE_SETXATTR)
1270 req->out.h.error = -E2BIG;
1271 request_end(fc, req);
1272 goto restart;
1273 }
1274 spin_lock(&fpq->lock);
1275 list_add(&req->list, &fpq->io);
1276 spin_unlock(&fpq->lock);
1277 cs->req = req;
1278 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1279 if (!err)
1280 err = fuse_copy_args(cs, in->numargs, in->argpages,
1281 (struct fuse_arg *) in->args, 0);
1282 fuse_copy_finish(cs);
1283 spin_lock(&fpq->lock);
1284 clear_bit(FR_LOCKED, &req->flags);
1285 if (!fpq->connected) {
1286 err = -ENODEV;
1287 goto out_end;
1288 }
1289 if (err) {
1290 req->out.h.error = -EIO;
1291 goto out_end;
1292 }
1293 if (!test_bit(FR_ISREPLY, &req->flags)) {
1294 err = reqsize;
1295 goto out_end;
1296 }
1297 list_move_tail(&req->list, &fpq->processing);
1298 spin_unlock(&fpq->lock);
1299 set_bit(FR_SENT, &req->flags);
1300 /* matches barrier in request_wait_answer() */
1301 smp_mb__after_atomic();
1302 if (test_bit(FR_INTERRUPTED, &req->flags))
1303 queue_interrupt(fiq, req);
1304
1305 return reqsize;
1306
1307 out_end:
1308 if (!test_bit(FR_PRIVATE, &req->flags))
1309 list_del_init(&req->list);
1310 spin_unlock(&fpq->lock);
1311 request_end(fc, req);
1312 return err;
1313
1314 err_unlock:
1315 spin_unlock(&fiq->waitq.lock);
1316 return err;
1317 }
1318
1319 static int fuse_dev_open(struct inode *inode, struct file *file)
1320 {
1321 /*
1322 * The fuse device's file's private_data is used to hold
1323 * the fuse_conn(ection) when it is mounted, and is used to
1324 * keep track of whether the file has been mounted already.
1325 */
1326 file->private_data = NULL;
1327 return 0;
1328 }
1329
1330 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1331 {
1332 struct fuse_copy_state cs;
1333 struct file *file = iocb->ki_filp;
1334 struct fuse_dev *fud = fuse_get_dev(file);
1335
1336 if (!fud)
1337 return -EPERM;
1338
1339 if (!iter_is_iovec(to))
1340 return -EINVAL;
1341
1342 fuse_copy_init(&cs, 1, to);
1343
1344 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1345 }
1346
1347 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1348 struct pipe_inode_info *pipe,
1349 size_t len, unsigned int flags)
1350 {
1351 int total, ret;
1352 int page_nr = 0;
1353 struct pipe_buffer *bufs;
1354 struct fuse_copy_state cs;
1355 struct fuse_dev *fud = fuse_get_dev(in);
1356
1357 if (!fud)
1358 return -EPERM;
1359
1360 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1361 if (!bufs)
1362 return -ENOMEM;
1363
1364 fuse_copy_init(&cs, 1, NULL);
1365 cs.pipebufs = bufs;
1366 cs.pipe = pipe;
1367 ret = fuse_dev_do_read(fud, in, &cs, len);
1368 if (ret < 0)
1369 goto out;
1370
1371 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1372 ret = -EIO;
1373 goto out;
1374 }
1375
1376 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1377 /*
1378 * Need to be careful about this. Having buf->ops in module
1379 * code can Oops if the buffer persists after module unload.
1380 */
1381 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1382 bufs[page_nr].flags = 0;
1383 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1384 if (unlikely(ret < 0))
1385 break;
1386 }
1387 if (total)
1388 ret = total;
1389 out:
1390 for (; page_nr < cs.nr_segs; page_nr++)
1391 put_page(bufs[page_nr].page);
1392
1393 kfree(bufs);
1394 return ret;
1395 }
1396
1397 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1398 struct fuse_copy_state *cs)
1399 {
1400 struct fuse_notify_poll_wakeup_out outarg;
1401 int err = -EINVAL;
1402
1403 if (size != sizeof(outarg))
1404 goto err;
1405
1406 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1407 if (err)
1408 goto err;
1409
1410 fuse_copy_finish(cs);
1411 return fuse_notify_poll_wakeup(fc, &outarg);
1412
1413 err:
1414 fuse_copy_finish(cs);
1415 return err;
1416 }
1417
1418 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1419 struct fuse_copy_state *cs)
1420 {
1421 struct fuse_notify_inval_inode_out outarg;
1422 int err = -EINVAL;
1423
1424 if (size != sizeof(outarg))
1425 goto err;
1426
1427 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1428 if (err)
1429 goto err;
1430 fuse_copy_finish(cs);
1431
1432 down_read(&fc->killsb);
1433 err = -ENOENT;
1434 if (fc->sb) {
1435 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1436 outarg.off, outarg.len);
1437 }
1438 up_read(&fc->killsb);
1439 return err;
1440
1441 err:
1442 fuse_copy_finish(cs);
1443 return err;
1444 }
1445
1446 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1447 struct fuse_copy_state *cs)
1448 {
1449 struct fuse_notify_inval_entry_out outarg;
1450 int err = -ENOMEM;
1451 char *buf;
1452 struct qstr name;
1453
1454 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1455 if (!buf)
1456 goto err;
1457
1458 err = -EINVAL;
1459 if (size < sizeof(outarg))
1460 goto err;
1461
1462 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1463 if (err)
1464 goto err;
1465
1466 err = -ENAMETOOLONG;
1467 if (outarg.namelen > FUSE_NAME_MAX)
1468 goto err;
1469
1470 err = -EINVAL;
1471 if (size != sizeof(outarg) + outarg.namelen + 1)
1472 goto err;
1473
1474 name.name = buf;
1475 name.len = outarg.namelen;
1476 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1477 if (err)
1478 goto err;
1479 fuse_copy_finish(cs);
1480 buf[outarg.namelen] = 0;
1481
1482 down_read(&fc->killsb);
1483 err = -ENOENT;
1484 if (fc->sb)
1485 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1486 up_read(&fc->killsb);
1487 kfree(buf);
1488 return err;
1489
1490 err:
1491 kfree(buf);
1492 fuse_copy_finish(cs);
1493 return err;
1494 }
1495
1496 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1497 struct fuse_copy_state *cs)
1498 {
1499 struct fuse_notify_delete_out outarg;
1500 int err = -ENOMEM;
1501 char *buf;
1502 struct qstr name;
1503
1504 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1505 if (!buf)
1506 goto err;
1507
1508 err = -EINVAL;
1509 if (size < sizeof(outarg))
1510 goto err;
1511
1512 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1513 if (err)
1514 goto err;
1515
1516 err = -ENAMETOOLONG;
1517 if (outarg.namelen > FUSE_NAME_MAX)
1518 goto err;
1519
1520 err = -EINVAL;
1521 if (size != sizeof(outarg) + outarg.namelen + 1)
1522 goto err;
1523
1524 name.name = buf;
1525 name.len = outarg.namelen;
1526 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1527 if (err)
1528 goto err;
1529 fuse_copy_finish(cs);
1530 buf[outarg.namelen] = 0;
1531
1532 down_read(&fc->killsb);
1533 err = -ENOENT;
1534 if (fc->sb)
1535 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1536 outarg.child, &name);
1537 up_read(&fc->killsb);
1538 kfree(buf);
1539 return err;
1540
1541 err:
1542 kfree(buf);
1543 fuse_copy_finish(cs);
1544 return err;
1545 }
1546
1547 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1548 struct fuse_copy_state *cs)
1549 {
1550 struct fuse_notify_store_out outarg;
1551 struct inode *inode;
1552 struct address_space *mapping;
1553 u64 nodeid;
1554 int err;
1555 pgoff_t index;
1556 unsigned int offset;
1557 unsigned int num;
1558 loff_t file_size;
1559 loff_t end;
1560
1561 err = -EINVAL;
1562 if (size < sizeof(outarg))
1563 goto out_finish;
1564
1565 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1566 if (err)
1567 goto out_finish;
1568
1569 err = -EINVAL;
1570 if (size - sizeof(outarg) != outarg.size)
1571 goto out_finish;
1572
1573 nodeid = outarg.nodeid;
1574
1575 down_read(&fc->killsb);
1576
1577 err = -ENOENT;
1578 if (!fc->sb)
1579 goto out_up_killsb;
1580
1581 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1582 if (!inode)
1583 goto out_up_killsb;
1584
1585 mapping = inode->i_mapping;
1586 index = outarg.offset >> PAGE_SHIFT;
1587 offset = outarg.offset & ~PAGE_MASK;
1588 file_size = i_size_read(inode);
1589 end = outarg.offset + outarg.size;
1590 if (end > file_size) {
1591 file_size = end;
1592 fuse_write_update_size(inode, file_size);
1593 }
1594
1595 num = outarg.size;
1596 while (num) {
1597 struct page *page;
1598 unsigned int this_num;
1599
1600 err = -ENOMEM;
1601 page = find_or_create_page(mapping, index,
1602 mapping_gfp_mask(mapping));
1603 if (!page)
1604 goto out_iput;
1605
1606 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1607 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1608 if (!err && offset == 0 &&
1609 (this_num == PAGE_SIZE || file_size == end))
1610 SetPageUptodate(page);
1611 unlock_page(page);
1612 put_page(page);
1613
1614 if (err)
1615 goto out_iput;
1616
1617 num -= this_num;
1618 offset = 0;
1619 index++;
1620 }
1621
1622 err = 0;
1623
1624 out_iput:
1625 iput(inode);
1626 out_up_killsb:
1627 up_read(&fc->killsb);
1628 out_finish:
1629 fuse_copy_finish(cs);
1630 return err;
1631 }
1632
1633 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1634 {
1635 release_pages(req->pages, req->num_pages, false);
1636 }
1637
1638 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1639 struct fuse_notify_retrieve_out *outarg)
1640 {
1641 int err;
1642 struct address_space *mapping = inode->i_mapping;
1643 struct fuse_req *req;
1644 pgoff_t index;
1645 loff_t file_size;
1646 unsigned int num;
1647 unsigned int offset;
1648 size_t total_len = 0;
1649 int num_pages;
1650
1651 offset = outarg->offset & ~PAGE_MASK;
1652 file_size = i_size_read(inode);
1653
1654 num = outarg->size;
1655 if (outarg->offset > file_size)
1656 num = 0;
1657 else if (outarg->offset + num > file_size)
1658 num = file_size - outarg->offset;
1659
1660 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1661 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1662
1663 req = fuse_get_req(fc, num_pages);
1664 if (IS_ERR(req))
1665 return PTR_ERR(req);
1666
1667 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1668 req->in.h.nodeid = outarg->nodeid;
1669 req->in.numargs = 2;
1670 req->in.argpages = 1;
1671 req->page_descs[0].offset = offset;
1672 req->end = fuse_retrieve_end;
1673
1674 index = outarg->offset >> PAGE_SHIFT;
1675
1676 while (num && req->num_pages < num_pages) {
1677 struct page *page;
1678 unsigned int this_num;
1679
1680 page = find_get_page(mapping, index);
1681 if (!page)
1682 break;
1683
1684 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1685 req->pages[req->num_pages] = page;
1686 req->page_descs[req->num_pages].length = this_num;
1687 req->num_pages++;
1688
1689 offset = 0;
1690 num -= this_num;
1691 total_len += this_num;
1692 index++;
1693 }
1694 req->misc.retrieve_in.offset = outarg->offset;
1695 req->misc.retrieve_in.size = total_len;
1696 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1697 req->in.args[0].value = &req->misc.retrieve_in;
1698 req->in.args[1].size = total_len;
1699
1700 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1701 if (err)
1702 fuse_retrieve_end(fc, req);
1703
1704 return err;
1705 }
1706
1707 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1708 struct fuse_copy_state *cs)
1709 {
1710 struct fuse_notify_retrieve_out outarg;
1711 struct inode *inode;
1712 int err;
1713
1714 err = -EINVAL;
1715 if (size != sizeof(outarg))
1716 goto copy_finish;
1717
1718 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1719 if (err)
1720 goto copy_finish;
1721
1722 fuse_copy_finish(cs);
1723
1724 down_read(&fc->killsb);
1725 err = -ENOENT;
1726 if (fc->sb) {
1727 u64 nodeid = outarg.nodeid;
1728
1729 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1730 if (inode) {
1731 err = fuse_retrieve(fc, inode, &outarg);
1732 iput(inode);
1733 }
1734 }
1735 up_read(&fc->killsb);
1736
1737 return err;
1738
1739 copy_finish:
1740 fuse_copy_finish(cs);
1741 return err;
1742 }
1743
1744 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1745 unsigned int size, struct fuse_copy_state *cs)
1746 {
1747 /* Don't try to move pages (yet) */
1748 cs->move_pages = 0;
1749
1750 switch (code) {
1751 case FUSE_NOTIFY_POLL:
1752 return fuse_notify_poll(fc, size, cs);
1753
1754 case FUSE_NOTIFY_INVAL_INODE:
1755 return fuse_notify_inval_inode(fc, size, cs);
1756
1757 case FUSE_NOTIFY_INVAL_ENTRY:
1758 return fuse_notify_inval_entry(fc, size, cs);
1759
1760 case FUSE_NOTIFY_STORE:
1761 return fuse_notify_store(fc, size, cs);
1762
1763 case FUSE_NOTIFY_RETRIEVE:
1764 return fuse_notify_retrieve(fc, size, cs);
1765
1766 case FUSE_NOTIFY_DELETE:
1767 return fuse_notify_delete(fc, size, cs);
1768
1769 default:
1770 fuse_copy_finish(cs);
1771 return -EINVAL;
1772 }
1773 }
1774
1775 /* Look up request on processing list by unique ID */
1776 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1777 {
1778 struct fuse_req *req;
1779
1780 list_for_each_entry(req, &fpq->processing, list) {
1781 if (req->in.h.unique == unique || req->intr_unique == unique)
1782 return req;
1783 }
1784 return NULL;
1785 }
1786
1787 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1788 unsigned nbytes)
1789 {
1790 unsigned reqsize = sizeof(struct fuse_out_header);
1791
1792 if (out->h.error)
1793 return nbytes != reqsize ? -EINVAL : 0;
1794
1795 reqsize += len_args(out->numargs, out->args);
1796
1797 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1798 return -EINVAL;
1799 else if (reqsize > nbytes) {
1800 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1801 unsigned diffsize = reqsize - nbytes;
1802 if (diffsize > lastarg->size)
1803 return -EINVAL;
1804 lastarg->size -= diffsize;
1805 }
1806 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1807 out->page_zeroing);
1808 }
1809
1810 /*
1811 * Write a single reply to a request. First the header is copied from
1812 * the write buffer. The request is then searched on the processing
1813 * list by the unique ID found in the header. If found, then remove
1814 * it from the list and copy the rest of the buffer to the request.
1815 * The request is finished by calling request_end()
1816 */
1817 static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1818 struct fuse_copy_state *cs, size_t nbytes)
1819 {
1820 int err;
1821 struct fuse_conn *fc = fud->fc;
1822 struct fuse_pqueue *fpq = &fud->pq;
1823 struct fuse_req *req;
1824 struct fuse_out_header oh;
1825
1826 if (task_active_pid_ns(current) != fc->pid_ns)
1827 return -EIO;
1828
1829 if (nbytes < sizeof(struct fuse_out_header))
1830 return -EINVAL;
1831
1832 err = fuse_copy_one(cs, &oh, sizeof(oh));
1833 if (err)
1834 goto err_finish;
1835
1836 err = -EINVAL;
1837 if (oh.len != nbytes)
1838 goto err_finish;
1839
1840 /*
1841 * Zero oh.unique indicates unsolicited notification message
1842 * and error contains notification code.
1843 */
1844 if (!oh.unique) {
1845 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1846 return err ? err : nbytes;
1847 }
1848
1849 err = -EINVAL;
1850 if (oh.error <= -1000 || oh.error > 0)
1851 goto err_finish;
1852
1853 spin_lock(&fpq->lock);
1854 err = -ENOENT;
1855 if (!fpq->connected)
1856 goto err_unlock_pq;
1857
1858 req = request_find(fpq, oh.unique);
1859 if (!req)
1860 goto err_unlock_pq;
1861
1862 /* Is it an interrupt reply? */
1863 if (req->intr_unique == oh.unique) {
1864 spin_unlock(&fpq->lock);
1865
1866 err = -EINVAL;
1867 if (nbytes != sizeof(struct fuse_out_header))
1868 goto err_finish;
1869
1870 if (oh.error == -ENOSYS)
1871 fc->no_interrupt = 1;
1872 else if (oh.error == -EAGAIN)
1873 queue_interrupt(&fc->iq, req);
1874
1875 fuse_copy_finish(cs);
1876 return nbytes;
1877 }
1878
1879 clear_bit(FR_SENT, &req->flags);
1880 list_move(&req->list, &fpq->io);
1881 req->out.h = oh;
1882 set_bit(FR_LOCKED, &req->flags);
1883 spin_unlock(&fpq->lock);
1884 cs->req = req;
1885 if (!req->out.page_replace)
1886 cs->move_pages = 0;
1887
1888 err = copy_out_args(cs, &req->out, nbytes);
1889 fuse_copy_finish(cs);
1890
1891 spin_lock(&fpq->lock);
1892 clear_bit(FR_LOCKED, &req->flags);
1893 if (!fpq->connected)
1894 err = -ENOENT;
1895 else if (err)
1896 req->out.h.error = -EIO;
1897 if (!test_bit(FR_PRIVATE, &req->flags))
1898 list_del_init(&req->list);
1899 spin_unlock(&fpq->lock);
1900
1901 request_end(fc, req);
1902
1903 return err ? err : nbytes;
1904
1905 err_unlock_pq:
1906 spin_unlock(&fpq->lock);
1907 err_finish:
1908 fuse_copy_finish(cs);
1909 return err;
1910 }
1911
1912 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1913 {
1914 struct fuse_copy_state cs;
1915 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1916
1917 if (!fud)
1918 return -EPERM;
1919
1920 if (!iter_is_iovec(from))
1921 return -EINVAL;
1922
1923 fuse_copy_init(&cs, 0, from);
1924
1925 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
1926 }
1927
1928 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1929 struct file *out, loff_t *ppos,
1930 size_t len, unsigned int flags)
1931 {
1932 unsigned nbuf;
1933 unsigned idx;
1934 struct pipe_buffer *bufs;
1935 struct fuse_copy_state cs;
1936 struct fuse_dev *fud;
1937 size_t rem;
1938 ssize_t ret;
1939
1940 fud = fuse_get_dev(out);
1941 if (!fud)
1942 return -EPERM;
1943
1944 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1945 if (!bufs)
1946 return -ENOMEM;
1947
1948 pipe_lock(pipe);
1949 nbuf = 0;
1950 rem = 0;
1951 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1952 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1953
1954 ret = -EINVAL;
1955 if (rem < len) {
1956 pipe_unlock(pipe);
1957 goto out;
1958 }
1959
1960 rem = len;
1961 while (rem) {
1962 struct pipe_buffer *ibuf;
1963 struct pipe_buffer *obuf;
1964
1965 BUG_ON(nbuf >= pipe->buffers);
1966 BUG_ON(!pipe->nrbufs);
1967 ibuf = &pipe->bufs[pipe->curbuf];
1968 obuf = &bufs[nbuf];
1969
1970 if (rem >= ibuf->len) {
1971 *obuf = *ibuf;
1972 ibuf->ops = NULL;
1973 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1974 pipe->nrbufs--;
1975 } else {
1976 pipe_buf_get(pipe, ibuf);
1977 *obuf = *ibuf;
1978 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1979 obuf->len = rem;
1980 ibuf->offset += obuf->len;
1981 ibuf->len -= obuf->len;
1982 }
1983 nbuf++;
1984 rem -= obuf->len;
1985 }
1986 pipe_unlock(pipe);
1987
1988 fuse_copy_init(&cs, 0, NULL);
1989 cs.pipebufs = bufs;
1990 cs.nr_segs = nbuf;
1991 cs.pipe = pipe;
1992
1993 if (flags & SPLICE_F_MOVE)
1994 cs.move_pages = 1;
1995
1996 ret = fuse_dev_do_write(fud, &cs, len);
1997
1998 for (idx = 0; idx < nbuf; idx++)
1999 pipe_buf_release(pipe, &bufs[idx]);
2000
2001 out:
2002 kfree(bufs);
2003 return ret;
2004 }
2005
2006 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2007 {
2008 unsigned mask = POLLOUT | POLLWRNORM;
2009 struct fuse_iqueue *fiq;
2010 struct fuse_dev *fud = fuse_get_dev(file);
2011
2012 if (!fud)
2013 return POLLERR;
2014
2015 fiq = &fud->fc->iq;
2016 poll_wait(file, &fiq->waitq, wait);
2017
2018 spin_lock(&fiq->waitq.lock);
2019 if (!fiq->connected)
2020 mask = POLLERR;
2021 else if (request_pending(fiq))
2022 mask |= POLLIN | POLLRDNORM;
2023 spin_unlock(&fiq->waitq.lock);
2024
2025 return mask;
2026 }
2027
2028 /*
2029 * Abort all requests on the given list (pending or processing)
2030 *
2031 * This function releases and reacquires fc->lock
2032 */
2033 static void end_requests(struct fuse_conn *fc, struct list_head *head)
2034 {
2035 while (!list_empty(head)) {
2036 struct fuse_req *req;
2037 req = list_entry(head->next, struct fuse_req, list);
2038 req->out.h.error = -ECONNABORTED;
2039 clear_bit(FR_SENT, &req->flags);
2040 list_del_init(&req->list);
2041 request_end(fc, req);
2042 }
2043 }
2044
2045 static void end_polls(struct fuse_conn *fc)
2046 {
2047 struct rb_node *p;
2048
2049 p = rb_first(&fc->polled_files);
2050
2051 while (p) {
2052 struct fuse_file *ff;
2053 ff = rb_entry(p, struct fuse_file, polled_node);
2054 wake_up_interruptible_all(&ff->poll_wait);
2055
2056 p = rb_next(p);
2057 }
2058 }
2059
2060 /*
2061 * Abort all requests.
2062 *
2063 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2064 * filesystem.
2065 *
2066 * The same effect is usually achievable through killing the filesystem daemon
2067 * and all users of the filesystem. The exception is the combination of an
2068 * asynchronous request and the tricky deadlock (see
2069 * Documentation/filesystems/fuse.txt).
2070 *
2071 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2072 * requests, they should be finished off immediately. Locked requests will be
2073 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2074 * requests. It is possible that some request will finish before we can. This
2075 * is OK, the request will in that case be removed from the list before we touch
2076 * it.
2077 */
2078 void fuse_abort_conn(struct fuse_conn *fc)
2079 {
2080 struct fuse_iqueue *fiq = &fc->iq;
2081
2082 spin_lock(&fc->lock);
2083 if (fc->connected) {
2084 struct fuse_dev *fud;
2085 struct fuse_req *req, *next;
2086 LIST_HEAD(to_end1);
2087 LIST_HEAD(to_end2);
2088
2089 fc->connected = 0;
2090 fc->blocked = 0;
2091 fuse_set_initialized(fc);
2092 list_for_each_entry(fud, &fc->devices, entry) {
2093 struct fuse_pqueue *fpq = &fud->pq;
2094
2095 spin_lock(&fpq->lock);
2096 fpq->connected = 0;
2097 list_for_each_entry_safe(req, next, &fpq->io, list) {
2098 req->out.h.error = -ECONNABORTED;
2099 spin_lock(&req->waitq.lock);
2100 set_bit(FR_ABORTED, &req->flags);
2101 if (!test_bit(FR_LOCKED, &req->flags)) {
2102 set_bit(FR_PRIVATE, &req->flags);
2103 list_move(&req->list, &to_end1);
2104 }
2105 spin_unlock(&req->waitq.lock);
2106 }
2107 list_splice_init(&fpq->processing, &to_end2);
2108 spin_unlock(&fpq->lock);
2109 }
2110 fc->max_background = UINT_MAX;
2111 flush_bg_queue(fc);
2112
2113 spin_lock(&fiq->waitq.lock);
2114 fiq->connected = 0;
2115 list_splice_init(&fiq->pending, &to_end2);
2116 list_for_each_entry(req, &to_end2, list)
2117 clear_bit(FR_PENDING, &req->flags);
2118 while (forget_pending(fiq))
2119 kfree(dequeue_forget(fiq, 1, NULL));
2120 wake_up_all_locked(&fiq->waitq);
2121 spin_unlock(&fiq->waitq.lock);
2122 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2123 end_polls(fc);
2124 wake_up_all(&fc->blocked_waitq);
2125 spin_unlock(&fc->lock);
2126
2127 while (!list_empty(&to_end1)) {
2128 req = list_first_entry(&to_end1, struct fuse_req, list);
2129 __fuse_get_request(req);
2130 list_del_init(&req->list);
2131 request_end(fc, req);
2132 }
2133 end_requests(fc, &to_end2);
2134 } else {
2135 spin_unlock(&fc->lock);
2136 }
2137 }
2138 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2139
2140 int fuse_dev_release(struct inode *inode, struct file *file)
2141 {
2142 struct fuse_dev *fud = fuse_get_dev(file);
2143
2144 if (fud) {
2145 struct fuse_conn *fc = fud->fc;
2146 struct fuse_pqueue *fpq = &fud->pq;
2147
2148 WARN_ON(!list_empty(&fpq->io));
2149 end_requests(fc, &fpq->processing);
2150 /* Are we the last open device? */
2151 if (atomic_dec_and_test(&fc->dev_count)) {
2152 WARN_ON(fc->iq.fasync != NULL);
2153 fuse_abort_conn(fc);
2154 }
2155 fuse_dev_free(fud);
2156 }
2157 return 0;
2158 }
2159 EXPORT_SYMBOL_GPL(fuse_dev_release);
2160
2161 static int fuse_dev_fasync(int fd, struct file *file, int on)
2162 {
2163 struct fuse_dev *fud = fuse_get_dev(file);
2164
2165 if (!fud)
2166 return -EPERM;
2167
2168 /* No locking - fasync_helper does its own locking */
2169 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2170 }
2171
2172 static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2173 {
2174 struct fuse_dev *fud;
2175
2176 if (new->private_data)
2177 return -EINVAL;
2178
2179 fud = fuse_dev_alloc(fc);
2180 if (!fud)
2181 return -ENOMEM;
2182
2183 new->private_data = fud;
2184 atomic_inc(&fc->dev_count);
2185
2186 return 0;
2187 }
2188
2189 static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2190 unsigned long arg)
2191 {
2192 int err = -ENOTTY;
2193
2194 if (cmd == FUSE_DEV_IOC_CLONE) {
2195 int oldfd;
2196
2197 err = -EFAULT;
2198 if (!get_user(oldfd, (__u32 __user *) arg)) {
2199 struct file *old = fget(oldfd);
2200
2201 err = -EINVAL;
2202 if (old) {
2203 struct fuse_dev *fud = NULL;
2204
2205 /*
2206 * Check against file->f_op because CUSE
2207 * uses the same ioctl handler.
2208 */
2209 if (old->f_op == file->f_op &&
2210 old->f_cred->user_ns == file->f_cred->user_ns)
2211 fud = fuse_get_dev(old);
2212
2213 if (fud) {
2214 mutex_lock(&fuse_mutex);
2215 err = fuse_device_clone(fud->fc, file);
2216 mutex_unlock(&fuse_mutex);
2217 }
2218 fput(old);
2219 }
2220 }
2221 }
2222 return err;
2223 }
2224
2225 const struct file_operations fuse_dev_operations = {
2226 .owner = THIS_MODULE,
2227 .open = fuse_dev_open,
2228 .llseek = no_llseek,
2229 .read_iter = fuse_dev_read,
2230 .splice_read = fuse_dev_splice_read,
2231 .write_iter = fuse_dev_write,
2232 .splice_write = fuse_dev_splice_write,
2233 .poll = fuse_dev_poll,
2234 .release = fuse_dev_release,
2235 .fasync = fuse_dev_fasync,
2236 .unlocked_ioctl = fuse_dev_ioctl,
2237 .compat_ioctl = fuse_dev_ioctl,
2238 };
2239 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2240
2241 static struct miscdevice fuse_miscdevice = {
2242 .minor = FUSE_MINOR,
2243 .name = "fuse",
2244 .fops = &fuse_dev_operations,
2245 };
2246
2247 int __init fuse_dev_init(void)
2248 {
2249 int err = -ENOMEM;
2250 fuse_req_cachep = kmem_cache_create("fuse_request",
2251 sizeof(struct fuse_req),
2252 0, 0, NULL);
2253 if (!fuse_req_cachep)
2254 goto out;
2255
2256 err = misc_register(&fuse_miscdevice);
2257 if (err)
2258 goto out_cache_clean;
2259
2260 return 0;
2261
2262 out_cache_clean:
2263 kmem_cache_destroy(fuse_req_cachep);
2264 out:
2265 return err;
2266 }
2267
2268 void fuse_dev_cleanup(void)
2269 {
2270 misc_deregister(&fuse_miscdevice);
2271 kmem_cache_destroy(fuse_req_cachep);
2272 }