]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/fuse/dev.c
xfs: check for obviously bad level values in the bmbt root
[mirror_ubuntu-zesty-kernel.git] / fs / fuse / dev.c
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
22 #include <linux/sched.h>
23
24 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
25 MODULE_ALIAS("devname:fuse");
26
27 static struct kmem_cache *fuse_req_cachep;
28
29 static struct fuse_dev *fuse_get_dev(struct file *file)
30 {
31 /*
32 * Lockless access is OK, because file->private data is set
33 * once during mount and is valid until the file is released.
34 */
35 return ACCESS_ONCE(file->private_data);
36 }
37
38 static void fuse_request_init(struct fuse_req *req, struct page **pages,
39 struct fuse_page_desc *page_descs,
40 unsigned npages)
41 {
42 memset(req, 0, sizeof(*req));
43 memset(pages, 0, sizeof(*pages) * npages);
44 memset(page_descs, 0, sizeof(*page_descs) * npages);
45 INIT_LIST_HEAD(&req->list);
46 INIT_LIST_HEAD(&req->intr_entry);
47 init_waitqueue_head(&req->waitq);
48 atomic_set(&req->count, 1);
49 req->pages = pages;
50 req->page_descs = page_descs;
51 req->max_pages = npages;
52 __set_bit(FR_PENDING, &req->flags);
53 }
54
55 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
56 {
57 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
58 if (req) {
59 struct page **pages;
60 struct fuse_page_desc *page_descs;
61
62 if (npages <= FUSE_REQ_INLINE_PAGES) {
63 pages = req->inline_pages;
64 page_descs = req->inline_page_descs;
65 } else {
66 pages = kmalloc(sizeof(struct page *) * npages, flags);
67 page_descs = kmalloc(sizeof(struct fuse_page_desc) *
68 npages, flags);
69 }
70
71 if (!pages || !page_descs) {
72 kfree(pages);
73 kfree(page_descs);
74 kmem_cache_free(fuse_req_cachep, req);
75 return NULL;
76 }
77
78 fuse_request_init(req, pages, page_descs, npages);
79 }
80 return req;
81 }
82
83 struct fuse_req *fuse_request_alloc(unsigned npages)
84 {
85 return __fuse_request_alloc(npages, GFP_KERNEL);
86 }
87 EXPORT_SYMBOL_GPL(fuse_request_alloc);
88
89 struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
90 {
91 return __fuse_request_alloc(npages, GFP_NOFS);
92 }
93
94 void fuse_request_free(struct fuse_req *req)
95 {
96 if (req->pages != req->inline_pages) {
97 kfree(req->pages);
98 kfree(req->page_descs);
99 }
100 kmem_cache_free(fuse_req_cachep, req);
101 }
102
103 void __fuse_get_request(struct fuse_req *req)
104 {
105 atomic_inc(&req->count);
106 }
107
108 /* Must be called with > 1 refcount */
109 static void __fuse_put_request(struct fuse_req *req)
110 {
111 BUG_ON(atomic_read(&req->count) < 2);
112 atomic_dec(&req->count);
113 }
114
115 static void fuse_req_init_context(struct fuse_conn *fc, struct fuse_req *req)
116 {
117 req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
118 req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
119 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
120 }
121
122 void fuse_set_initialized(struct fuse_conn *fc)
123 {
124 /* Make sure stores before this are seen on another CPU */
125 smp_wmb();
126 fc->initialized = 1;
127 }
128
129 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
130 {
131 return !fc->initialized || (for_background && fc->blocked);
132 }
133
134 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
135 bool for_background)
136 {
137 struct fuse_req *req;
138 int err;
139 atomic_inc(&fc->num_waiting);
140
141 if (fuse_block_alloc(fc, for_background)) {
142 err = -EINTR;
143 if (wait_event_killable_exclusive(fc->blocked_waitq,
144 !fuse_block_alloc(fc, for_background)))
145 goto out;
146 }
147 /* Matches smp_wmb() in fuse_set_initialized() */
148 smp_rmb();
149
150 err = -ENOTCONN;
151 if (!fc->connected)
152 goto out;
153
154 err = -ECONNREFUSED;
155 if (fc->conn_error)
156 goto out;
157
158 req = fuse_request_alloc(npages);
159 err = -ENOMEM;
160 if (!req) {
161 if (for_background)
162 wake_up(&fc->blocked_waitq);
163 goto out;
164 }
165
166 fuse_req_init_context(fc, req);
167 __set_bit(FR_WAITING, &req->flags);
168 if (for_background)
169 __set_bit(FR_BACKGROUND, &req->flags);
170 if (req->in.h.uid == (uid_t)-1 || req->in.h.gid == (gid_t)-1) {
171 fuse_put_request(fc, req);
172 return ERR_PTR(-EOVERFLOW);
173 }
174
175 return req;
176
177 out:
178 atomic_dec(&fc->num_waiting);
179 return ERR_PTR(err);
180 }
181
182 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
183 {
184 return __fuse_get_req(fc, npages, false);
185 }
186 EXPORT_SYMBOL_GPL(fuse_get_req);
187
188 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
189 unsigned npages)
190 {
191 return __fuse_get_req(fc, npages, true);
192 }
193 EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
194
195 /*
196 * Return request in fuse_file->reserved_req. However that may
197 * currently be in use. If that is the case, wait for it to become
198 * available.
199 */
200 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
201 struct file *file)
202 {
203 struct fuse_req *req = NULL;
204 struct fuse_file *ff = file->private_data;
205
206 do {
207 wait_event(fc->reserved_req_waitq, ff->reserved_req);
208 spin_lock(&fc->lock);
209 if (ff->reserved_req) {
210 req = ff->reserved_req;
211 ff->reserved_req = NULL;
212 req->stolen_file = get_file(file);
213 }
214 spin_unlock(&fc->lock);
215 } while (!req);
216
217 return req;
218 }
219
220 /*
221 * Put stolen request back into fuse_file->reserved_req
222 */
223 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
224 {
225 struct file *file = req->stolen_file;
226 struct fuse_file *ff = file->private_data;
227
228 spin_lock(&fc->lock);
229 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
230 BUG_ON(ff->reserved_req);
231 ff->reserved_req = req;
232 wake_up_all(&fc->reserved_req_waitq);
233 spin_unlock(&fc->lock);
234 fput(file);
235 }
236
237 /*
238 * Gets a requests for a file operation, always succeeds
239 *
240 * This is used for sending the FLUSH request, which must get to
241 * userspace, due to POSIX locks which may need to be unlocked.
242 *
243 * If allocation fails due to OOM, use the reserved request in
244 * fuse_file.
245 *
246 * This is very unlikely to deadlock accidentally, since the
247 * filesystem should not have it's own file open. If deadlock is
248 * intentional, it can still be broken by "aborting" the filesystem.
249 */
250 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
251 struct file *file)
252 {
253 struct fuse_req *req;
254
255 atomic_inc(&fc->num_waiting);
256 wait_event(fc->blocked_waitq, fc->initialized);
257 /* Matches smp_wmb() in fuse_set_initialized() */
258 smp_rmb();
259 req = fuse_request_alloc(0);
260 if (!req)
261 req = get_reserved_req(fc, file);
262
263 fuse_req_init_context(fc, req);
264 __set_bit(FR_WAITING, &req->flags);
265 __clear_bit(FR_BACKGROUND, &req->flags);
266 return req;
267 }
268
269 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
270 {
271 if (atomic_dec_and_test(&req->count)) {
272 if (test_bit(FR_BACKGROUND, &req->flags)) {
273 /*
274 * We get here in the unlikely case that a background
275 * request was allocated but not sent
276 */
277 spin_lock(&fc->lock);
278 if (!fc->blocked)
279 wake_up(&fc->blocked_waitq);
280 spin_unlock(&fc->lock);
281 }
282
283 if (test_bit(FR_WAITING, &req->flags)) {
284 __clear_bit(FR_WAITING, &req->flags);
285 atomic_dec(&fc->num_waiting);
286 }
287
288 if (req->stolen_file)
289 put_reserved_req(fc, req);
290 else
291 fuse_request_free(req);
292 }
293 }
294 EXPORT_SYMBOL_GPL(fuse_put_request);
295
296 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
297 {
298 unsigned nbytes = 0;
299 unsigned i;
300
301 for (i = 0; i < numargs; i++)
302 nbytes += args[i].size;
303
304 return nbytes;
305 }
306
307 static u64 fuse_get_unique(struct fuse_iqueue *fiq)
308 {
309 return ++fiq->reqctr;
310 }
311
312 static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
313 {
314 req->in.h.len = sizeof(struct fuse_in_header) +
315 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
316 list_add_tail(&req->list, &fiq->pending);
317 wake_up_locked(&fiq->waitq);
318 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
319 }
320
321 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
322 u64 nodeid, u64 nlookup)
323 {
324 struct fuse_iqueue *fiq = &fc->iq;
325
326 forget->forget_one.nodeid = nodeid;
327 forget->forget_one.nlookup = nlookup;
328
329 spin_lock(&fiq->waitq.lock);
330 if (fiq->connected) {
331 fiq->forget_list_tail->next = forget;
332 fiq->forget_list_tail = forget;
333 wake_up_locked(&fiq->waitq);
334 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
335 } else {
336 kfree(forget);
337 }
338 spin_unlock(&fiq->waitq.lock);
339 }
340
341 static void flush_bg_queue(struct fuse_conn *fc)
342 {
343 while (fc->active_background < fc->max_background &&
344 !list_empty(&fc->bg_queue)) {
345 struct fuse_req *req;
346 struct fuse_iqueue *fiq = &fc->iq;
347
348 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
349 list_del(&req->list);
350 fc->active_background++;
351 spin_lock(&fiq->waitq.lock);
352 req->in.h.unique = fuse_get_unique(fiq);
353 queue_request(fiq, req);
354 spin_unlock(&fiq->waitq.lock);
355 }
356 }
357
358 /*
359 * This function is called when a request is finished. Either a reply
360 * has arrived or it was aborted (and not yet sent) or some error
361 * occurred during communication with userspace, or the device file
362 * was closed. The requester thread is woken up (if still waiting),
363 * the 'end' callback is called if given, else the reference to the
364 * request is released
365 */
366 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
367 {
368 struct fuse_iqueue *fiq = &fc->iq;
369
370 if (test_and_set_bit(FR_FINISHED, &req->flags))
371 return;
372
373 spin_lock(&fiq->waitq.lock);
374 list_del_init(&req->intr_entry);
375 spin_unlock(&fiq->waitq.lock);
376 WARN_ON(test_bit(FR_PENDING, &req->flags));
377 WARN_ON(test_bit(FR_SENT, &req->flags));
378 if (test_bit(FR_BACKGROUND, &req->flags)) {
379 spin_lock(&fc->lock);
380 clear_bit(FR_BACKGROUND, &req->flags);
381 if (fc->num_background == fc->max_background)
382 fc->blocked = 0;
383
384 /* Wake up next waiter, if any */
385 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
386 wake_up(&fc->blocked_waitq);
387
388 if (fc->num_background == fc->congestion_threshold &&
389 fc->connected && fc->bdi_initialized) {
390 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
391 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
392 }
393 fc->num_background--;
394 fc->active_background--;
395 flush_bg_queue(fc);
396 spin_unlock(&fc->lock);
397 }
398 wake_up(&req->waitq);
399 if (req->end)
400 req->end(fc, req);
401 fuse_put_request(fc, req);
402 }
403
404 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
405 {
406 spin_lock(&fiq->waitq.lock);
407 if (test_bit(FR_FINISHED, &req->flags)) {
408 spin_unlock(&fiq->waitq.lock);
409 return;
410 }
411 if (list_empty(&req->intr_entry)) {
412 list_add_tail(&req->intr_entry, &fiq->interrupts);
413 wake_up_locked(&fiq->waitq);
414 }
415 spin_unlock(&fiq->waitq.lock);
416 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
417 }
418
419 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
420 {
421 struct fuse_iqueue *fiq = &fc->iq;
422 int err;
423
424 if (!fc->no_interrupt) {
425 /* Any signal may interrupt this */
426 err = wait_event_interruptible(req->waitq,
427 test_bit(FR_FINISHED, &req->flags));
428 if (!err)
429 return;
430
431 set_bit(FR_INTERRUPTED, &req->flags);
432 /* matches barrier in fuse_dev_do_read() */
433 smp_mb__after_atomic();
434 if (test_bit(FR_SENT, &req->flags))
435 queue_interrupt(fiq, req);
436 }
437
438 if (!test_bit(FR_FORCE, &req->flags)) {
439 /* Only fatal signals may interrupt this */
440 err = wait_event_killable(req->waitq,
441 test_bit(FR_FINISHED, &req->flags));
442 if (!err)
443 return;
444
445 spin_lock(&fiq->waitq.lock);
446 /* Request is not yet in userspace, bail out */
447 if (test_bit(FR_PENDING, &req->flags)) {
448 list_del(&req->list);
449 spin_unlock(&fiq->waitq.lock);
450 __fuse_put_request(req);
451 req->out.h.error = -EINTR;
452 return;
453 }
454 spin_unlock(&fiq->waitq.lock);
455 }
456
457 /*
458 * Either request is already in userspace, or it was forced.
459 * Wait it out.
460 */
461 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
462 }
463
464 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
465 {
466 struct fuse_iqueue *fiq = &fc->iq;
467
468 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
469 spin_lock(&fiq->waitq.lock);
470 if (!fiq->connected) {
471 spin_unlock(&fiq->waitq.lock);
472 req->out.h.error = -ENOTCONN;
473 } else {
474 req->in.h.unique = fuse_get_unique(fiq);
475 queue_request(fiq, req);
476 /* acquire extra reference, since request is still needed
477 after request_end() */
478 __fuse_get_request(req);
479 spin_unlock(&fiq->waitq.lock);
480
481 request_wait_answer(fc, req);
482 /* Pairs with smp_wmb() in request_end() */
483 smp_rmb();
484 }
485 }
486
487 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
488 {
489 __set_bit(FR_ISREPLY, &req->flags);
490 if (!test_bit(FR_WAITING, &req->flags)) {
491 __set_bit(FR_WAITING, &req->flags);
492 atomic_inc(&fc->num_waiting);
493 }
494 __fuse_request_send(fc, req);
495 }
496 EXPORT_SYMBOL_GPL(fuse_request_send);
497
498 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
499 {
500 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
501 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
502
503 if (fc->minor < 9) {
504 switch (args->in.h.opcode) {
505 case FUSE_LOOKUP:
506 case FUSE_CREATE:
507 case FUSE_MKNOD:
508 case FUSE_MKDIR:
509 case FUSE_SYMLINK:
510 case FUSE_LINK:
511 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
512 break;
513 case FUSE_GETATTR:
514 case FUSE_SETATTR:
515 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
516 break;
517 }
518 }
519 if (fc->minor < 12) {
520 switch (args->in.h.opcode) {
521 case FUSE_CREATE:
522 args->in.args[0].size = sizeof(struct fuse_open_in);
523 break;
524 case FUSE_MKNOD:
525 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
526 break;
527 }
528 }
529 }
530
531 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
532 {
533 struct fuse_req *req;
534 ssize_t ret;
535
536 req = fuse_get_req(fc, 0);
537 if (IS_ERR(req))
538 return PTR_ERR(req);
539
540 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
541 fuse_adjust_compat(fc, args);
542
543 req->in.h.opcode = args->in.h.opcode;
544 req->in.h.nodeid = args->in.h.nodeid;
545 req->in.numargs = args->in.numargs;
546 memcpy(req->in.args, args->in.args,
547 args->in.numargs * sizeof(struct fuse_in_arg));
548 req->out.argvar = args->out.argvar;
549 req->out.numargs = args->out.numargs;
550 memcpy(req->out.args, args->out.args,
551 args->out.numargs * sizeof(struct fuse_arg));
552 fuse_request_send(fc, req);
553 ret = req->out.h.error;
554 if (!ret && args->out.argvar) {
555 BUG_ON(args->out.numargs != 1);
556 ret = req->out.args[0].size;
557 }
558 fuse_put_request(fc, req);
559
560 return ret;
561 }
562
563 /*
564 * Called under fc->lock
565 *
566 * fc->connected must have been checked previously
567 */
568 void fuse_request_send_background_locked(struct fuse_conn *fc,
569 struct fuse_req *req)
570 {
571 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
572 if (!test_bit(FR_WAITING, &req->flags)) {
573 __set_bit(FR_WAITING, &req->flags);
574 atomic_inc(&fc->num_waiting);
575 }
576 __set_bit(FR_ISREPLY, &req->flags);
577 fc->num_background++;
578 if (fc->num_background == fc->max_background)
579 fc->blocked = 1;
580 if (fc->num_background == fc->congestion_threshold &&
581 fc->bdi_initialized) {
582 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
583 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
584 }
585 list_add_tail(&req->list, &fc->bg_queue);
586 flush_bg_queue(fc);
587 }
588
589 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
590 {
591 BUG_ON(!req->end);
592 spin_lock(&fc->lock);
593 if (fc->connected) {
594 fuse_request_send_background_locked(fc, req);
595 spin_unlock(&fc->lock);
596 } else {
597 spin_unlock(&fc->lock);
598 req->out.h.error = -ENOTCONN;
599 req->end(fc, req);
600 fuse_put_request(fc, req);
601 }
602 }
603 EXPORT_SYMBOL_GPL(fuse_request_send_background);
604
605 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
606 struct fuse_req *req, u64 unique)
607 {
608 int err = -ENODEV;
609 struct fuse_iqueue *fiq = &fc->iq;
610
611 __clear_bit(FR_ISREPLY, &req->flags);
612 req->in.h.unique = unique;
613 spin_lock(&fiq->waitq.lock);
614 if (fiq->connected) {
615 queue_request(fiq, req);
616 err = 0;
617 }
618 spin_unlock(&fiq->waitq.lock);
619
620 return err;
621 }
622
623 void fuse_force_forget(struct file *file, u64 nodeid)
624 {
625 struct inode *inode = file_inode(file);
626 struct fuse_conn *fc = get_fuse_conn(inode);
627 struct fuse_req *req;
628 struct fuse_forget_in inarg;
629
630 memset(&inarg, 0, sizeof(inarg));
631 inarg.nlookup = 1;
632 req = fuse_get_req_nofail_nopages(fc, file);
633 req->in.h.opcode = FUSE_FORGET;
634 req->in.h.nodeid = nodeid;
635 req->in.numargs = 1;
636 req->in.args[0].size = sizeof(inarg);
637 req->in.args[0].value = &inarg;
638 __clear_bit(FR_ISREPLY, &req->flags);
639 __fuse_request_send(fc, req);
640 /* ignore errors */
641 fuse_put_request(fc, req);
642 }
643
644 /*
645 * Lock the request. Up to the next unlock_request() there mustn't be
646 * anything that could cause a page-fault. If the request was already
647 * aborted bail out.
648 */
649 static int lock_request(struct fuse_req *req)
650 {
651 int err = 0;
652 if (req) {
653 spin_lock(&req->waitq.lock);
654 if (test_bit(FR_ABORTED, &req->flags))
655 err = -ENOENT;
656 else
657 set_bit(FR_LOCKED, &req->flags);
658 spin_unlock(&req->waitq.lock);
659 }
660 return err;
661 }
662
663 /*
664 * Unlock request. If it was aborted while locked, caller is responsible
665 * for unlocking and ending the request.
666 */
667 static int unlock_request(struct fuse_req *req)
668 {
669 int err = 0;
670 if (req) {
671 spin_lock(&req->waitq.lock);
672 if (test_bit(FR_ABORTED, &req->flags))
673 err = -ENOENT;
674 else
675 clear_bit(FR_LOCKED, &req->flags);
676 spin_unlock(&req->waitq.lock);
677 }
678 return err;
679 }
680
681 struct fuse_copy_state {
682 int write;
683 struct fuse_req *req;
684 struct iov_iter *iter;
685 struct pipe_buffer *pipebufs;
686 struct pipe_buffer *currbuf;
687 struct pipe_inode_info *pipe;
688 unsigned long nr_segs;
689 struct page *pg;
690 unsigned len;
691 unsigned offset;
692 unsigned move_pages:1;
693 };
694
695 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
696 struct iov_iter *iter)
697 {
698 memset(cs, 0, sizeof(*cs));
699 cs->write = write;
700 cs->iter = iter;
701 }
702
703 /* Unmap and put previous page of userspace buffer */
704 static void fuse_copy_finish(struct fuse_copy_state *cs)
705 {
706 if (cs->currbuf) {
707 struct pipe_buffer *buf = cs->currbuf;
708
709 if (cs->write)
710 buf->len = PAGE_SIZE - cs->len;
711 cs->currbuf = NULL;
712 } else if (cs->pg) {
713 if (cs->write) {
714 flush_dcache_page(cs->pg);
715 set_page_dirty_lock(cs->pg);
716 }
717 put_page(cs->pg);
718 }
719 cs->pg = NULL;
720 }
721
722 /*
723 * Get another pagefull of userspace buffer, and map it to kernel
724 * address space, and lock request
725 */
726 static int fuse_copy_fill(struct fuse_copy_state *cs)
727 {
728 struct page *page;
729 int err;
730
731 err = unlock_request(cs->req);
732 if (err)
733 return err;
734
735 fuse_copy_finish(cs);
736 if (cs->pipebufs) {
737 struct pipe_buffer *buf = cs->pipebufs;
738
739 if (!cs->write) {
740 err = pipe_buf_confirm(cs->pipe, buf);
741 if (err)
742 return err;
743
744 BUG_ON(!cs->nr_segs);
745 cs->currbuf = buf;
746 cs->pg = buf->page;
747 cs->offset = buf->offset;
748 cs->len = buf->len;
749 cs->pipebufs++;
750 cs->nr_segs--;
751 } else {
752 if (cs->nr_segs == cs->pipe->buffers)
753 return -EIO;
754
755 page = alloc_page(GFP_HIGHUSER);
756 if (!page)
757 return -ENOMEM;
758
759 buf->page = page;
760 buf->offset = 0;
761 buf->len = 0;
762
763 cs->currbuf = buf;
764 cs->pg = page;
765 cs->offset = 0;
766 cs->len = PAGE_SIZE;
767 cs->pipebufs++;
768 cs->nr_segs++;
769 }
770 } else {
771 size_t off;
772 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
773 if (err < 0)
774 return err;
775 BUG_ON(!err);
776 cs->len = err;
777 cs->offset = off;
778 cs->pg = page;
779 iov_iter_advance(cs->iter, err);
780 }
781
782 return lock_request(cs->req);
783 }
784
785 /* Do as much copy to/from userspace buffer as we can */
786 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
787 {
788 unsigned ncpy = min(*size, cs->len);
789 if (val) {
790 void *pgaddr = kmap_atomic(cs->pg);
791 void *buf = pgaddr + cs->offset;
792
793 if (cs->write)
794 memcpy(buf, *val, ncpy);
795 else
796 memcpy(*val, buf, ncpy);
797
798 kunmap_atomic(pgaddr);
799 *val += ncpy;
800 }
801 *size -= ncpy;
802 cs->len -= ncpy;
803 cs->offset += ncpy;
804 return ncpy;
805 }
806
807 static int fuse_check_page(struct page *page)
808 {
809 if (page_mapcount(page) ||
810 page->mapping != NULL ||
811 page_count(page) != 1 ||
812 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
813 ~(1 << PG_locked |
814 1 << PG_referenced |
815 1 << PG_uptodate |
816 1 << PG_lru |
817 1 << PG_active |
818 1 << PG_reclaim))) {
819 printk(KERN_WARNING "fuse: trying to steal weird page\n");
820 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
821 return 1;
822 }
823 return 0;
824 }
825
826 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
827 {
828 int err;
829 struct page *oldpage = *pagep;
830 struct page *newpage;
831 struct pipe_buffer *buf = cs->pipebufs;
832
833 err = unlock_request(cs->req);
834 if (err)
835 return err;
836
837 fuse_copy_finish(cs);
838
839 err = pipe_buf_confirm(cs->pipe, buf);
840 if (err)
841 return err;
842
843 BUG_ON(!cs->nr_segs);
844 cs->currbuf = buf;
845 cs->len = buf->len;
846 cs->pipebufs++;
847 cs->nr_segs--;
848
849 if (cs->len != PAGE_SIZE)
850 goto out_fallback;
851
852 if (pipe_buf_steal(cs->pipe, buf) != 0)
853 goto out_fallback;
854
855 newpage = buf->page;
856
857 if (!PageUptodate(newpage))
858 SetPageUptodate(newpage);
859
860 ClearPageMappedToDisk(newpage);
861
862 if (fuse_check_page(newpage) != 0)
863 goto out_fallback_unlock;
864
865 /*
866 * This is a new and locked page, it shouldn't be mapped or
867 * have any special flags on it
868 */
869 if (WARN_ON(page_mapped(oldpage)))
870 goto out_fallback_unlock;
871 if (WARN_ON(page_has_private(oldpage)))
872 goto out_fallback_unlock;
873 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
874 goto out_fallback_unlock;
875 if (WARN_ON(PageMlocked(oldpage)))
876 goto out_fallback_unlock;
877
878 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
879 if (err) {
880 unlock_page(newpage);
881 return err;
882 }
883
884 get_page(newpage);
885
886 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
887 lru_cache_add_file(newpage);
888
889 err = 0;
890 spin_lock(&cs->req->waitq.lock);
891 if (test_bit(FR_ABORTED, &cs->req->flags))
892 err = -ENOENT;
893 else
894 *pagep = newpage;
895 spin_unlock(&cs->req->waitq.lock);
896
897 if (err) {
898 unlock_page(newpage);
899 put_page(newpage);
900 return err;
901 }
902
903 unlock_page(oldpage);
904 put_page(oldpage);
905 cs->len = 0;
906
907 return 0;
908
909 out_fallback_unlock:
910 unlock_page(newpage);
911 out_fallback:
912 cs->pg = buf->page;
913 cs->offset = buf->offset;
914
915 err = lock_request(cs->req);
916 if (err)
917 return err;
918
919 return 1;
920 }
921
922 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
923 unsigned offset, unsigned count)
924 {
925 struct pipe_buffer *buf;
926 int err;
927
928 if (cs->nr_segs == cs->pipe->buffers)
929 return -EIO;
930
931 err = unlock_request(cs->req);
932 if (err)
933 return err;
934
935 fuse_copy_finish(cs);
936
937 buf = cs->pipebufs;
938 get_page(page);
939 buf->page = page;
940 buf->offset = offset;
941 buf->len = count;
942
943 cs->pipebufs++;
944 cs->nr_segs++;
945 cs->len = 0;
946
947 return 0;
948 }
949
950 /*
951 * Copy a page in the request to/from the userspace buffer. Must be
952 * done atomically
953 */
954 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
955 unsigned offset, unsigned count, int zeroing)
956 {
957 int err;
958 struct page *page = *pagep;
959
960 if (page && zeroing && count < PAGE_SIZE)
961 clear_highpage(page);
962
963 while (count) {
964 if (cs->write && cs->pipebufs && page) {
965 return fuse_ref_page(cs, page, offset, count);
966 } else if (!cs->len) {
967 if (cs->move_pages && page &&
968 offset == 0 && count == PAGE_SIZE) {
969 err = fuse_try_move_page(cs, pagep);
970 if (err <= 0)
971 return err;
972 } else {
973 err = fuse_copy_fill(cs);
974 if (err)
975 return err;
976 }
977 }
978 if (page) {
979 void *mapaddr = kmap_atomic(page);
980 void *buf = mapaddr + offset;
981 offset += fuse_copy_do(cs, &buf, &count);
982 kunmap_atomic(mapaddr);
983 } else
984 offset += fuse_copy_do(cs, NULL, &count);
985 }
986 if (page && !cs->write)
987 flush_dcache_page(page);
988 return 0;
989 }
990
991 /* Copy pages in the request to/from userspace buffer */
992 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
993 int zeroing)
994 {
995 unsigned i;
996 struct fuse_req *req = cs->req;
997
998 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
999 int err;
1000 unsigned offset = req->page_descs[i].offset;
1001 unsigned count = min(nbytes, req->page_descs[i].length);
1002
1003 err = fuse_copy_page(cs, &req->pages[i], offset, count,
1004 zeroing);
1005 if (err)
1006 return err;
1007
1008 nbytes -= count;
1009 }
1010 return 0;
1011 }
1012
1013 /* Copy a single argument in the request to/from userspace buffer */
1014 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1015 {
1016 while (size) {
1017 if (!cs->len) {
1018 int err = fuse_copy_fill(cs);
1019 if (err)
1020 return err;
1021 }
1022 fuse_copy_do(cs, &val, &size);
1023 }
1024 return 0;
1025 }
1026
1027 /* Copy request arguments to/from userspace buffer */
1028 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1029 unsigned argpages, struct fuse_arg *args,
1030 int zeroing)
1031 {
1032 int err = 0;
1033 unsigned i;
1034
1035 for (i = 0; !err && i < numargs; i++) {
1036 struct fuse_arg *arg = &args[i];
1037 if (i == numargs - 1 && argpages)
1038 err = fuse_copy_pages(cs, arg->size, zeroing);
1039 else
1040 err = fuse_copy_one(cs, arg->value, arg->size);
1041 }
1042 return err;
1043 }
1044
1045 static int forget_pending(struct fuse_iqueue *fiq)
1046 {
1047 return fiq->forget_list_head.next != NULL;
1048 }
1049
1050 static int request_pending(struct fuse_iqueue *fiq)
1051 {
1052 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1053 forget_pending(fiq);
1054 }
1055
1056 /*
1057 * Transfer an interrupt request to userspace
1058 *
1059 * Unlike other requests this is assembled on demand, without a need
1060 * to allocate a separate fuse_req structure.
1061 *
1062 * Called with fiq->waitq.lock held, releases it
1063 */
1064 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1065 struct fuse_copy_state *cs,
1066 size_t nbytes, struct fuse_req *req)
1067 __releases(fiq->waitq.lock)
1068 {
1069 struct fuse_in_header ih;
1070 struct fuse_interrupt_in arg;
1071 unsigned reqsize = sizeof(ih) + sizeof(arg);
1072 int err;
1073
1074 list_del_init(&req->intr_entry);
1075 req->intr_unique = fuse_get_unique(fiq);
1076 memset(&ih, 0, sizeof(ih));
1077 memset(&arg, 0, sizeof(arg));
1078 ih.len = reqsize;
1079 ih.opcode = FUSE_INTERRUPT;
1080 ih.unique = req->intr_unique;
1081 arg.unique = req->in.h.unique;
1082
1083 spin_unlock(&fiq->waitq.lock);
1084 if (nbytes < reqsize)
1085 return -EINVAL;
1086
1087 err = fuse_copy_one(cs, &ih, sizeof(ih));
1088 if (!err)
1089 err = fuse_copy_one(cs, &arg, sizeof(arg));
1090 fuse_copy_finish(cs);
1091
1092 return err ? err : reqsize;
1093 }
1094
1095 static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
1096 unsigned max,
1097 unsigned *countp)
1098 {
1099 struct fuse_forget_link *head = fiq->forget_list_head.next;
1100 struct fuse_forget_link **newhead = &head;
1101 unsigned count;
1102
1103 for (count = 0; *newhead != NULL && count < max; count++)
1104 newhead = &(*newhead)->next;
1105
1106 fiq->forget_list_head.next = *newhead;
1107 *newhead = NULL;
1108 if (fiq->forget_list_head.next == NULL)
1109 fiq->forget_list_tail = &fiq->forget_list_head;
1110
1111 if (countp != NULL)
1112 *countp = count;
1113
1114 return head;
1115 }
1116
1117 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1118 struct fuse_copy_state *cs,
1119 size_t nbytes)
1120 __releases(fiq->waitq.lock)
1121 {
1122 int err;
1123 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
1124 struct fuse_forget_in arg = {
1125 .nlookup = forget->forget_one.nlookup,
1126 };
1127 struct fuse_in_header ih = {
1128 .opcode = FUSE_FORGET,
1129 .nodeid = forget->forget_one.nodeid,
1130 .unique = fuse_get_unique(fiq),
1131 .len = sizeof(ih) + sizeof(arg),
1132 };
1133
1134 spin_unlock(&fiq->waitq.lock);
1135 kfree(forget);
1136 if (nbytes < ih.len)
1137 return -EINVAL;
1138
1139 err = fuse_copy_one(cs, &ih, sizeof(ih));
1140 if (!err)
1141 err = fuse_copy_one(cs, &arg, sizeof(arg));
1142 fuse_copy_finish(cs);
1143
1144 if (err)
1145 return err;
1146
1147 return ih.len;
1148 }
1149
1150 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1151 struct fuse_copy_state *cs, size_t nbytes)
1152 __releases(fiq->waitq.lock)
1153 {
1154 int err;
1155 unsigned max_forgets;
1156 unsigned count;
1157 struct fuse_forget_link *head;
1158 struct fuse_batch_forget_in arg = { .count = 0 };
1159 struct fuse_in_header ih = {
1160 .opcode = FUSE_BATCH_FORGET,
1161 .unique = fuse_get_unique(fiq),
1162 .len = sizeof(ih) + sizeof(arg),
1163 };
1164
1165 if (nbytes < ih.len) {
1166 spin_unlock(&fiq->waitq.lock);
1167 return -EINVAL;
1168 }
1169
1170 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1171 head = dequeue_forget(fiq, max_forgets, &count);
1172 spin_unlock(&fiq->waitq.lock);
1173
1174 arg.count = count;
1175 ih.len += count * sizeof(struct fuse_forget_one);
1176 err = fuse_copy_one(cs, &ih, sizeof(ih));
1177 if (!err)
1178 err = fuse_copy_one(cs, &arg, sizeof(arg));
1179
1180 while (head) {
1181 struct fuse_forget_link *forget = head;
1182
1183 if (!err) {
1184 err = fuse_copy_one(cs, &forget->forget_one,
1185 sizeof(forget->forget_one));
1186 }
1187 head = forget->next;
1188 kfree(forget);
1189 }
1190
1191 fuse_copy_finish(cs);
1192
1193 if (err)
1194 return err;
1195
1196 return ih.len;
1197 }
1198
1199 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1200 struct fuse_copy_state *cs,
1201 size_t nbytes)
1202 __releases(fiq->waitq.lock)
1203 {
1204 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1205 return fuse_read_single_forget(fiq, cs, nbytes);
1206 else
1207 return fuse_read_batch_forget(fiq, cs, nbytes);
1208 }
1209
1210 /*
1211 * Read a single request into the userspace filesystem's buffer. This
1212 * function waits until a request is available, then removes it from
1213 * the pending list and copies request data to userspace buffer. If
1214 * no reply is needed (FORGET) or request has been aborted or there
1215 * was an error during the copying then it's finished by calling
1216 * request_end(). Otherwise add it to the processing list, and set
1217 * the 'sent' flag.
1218 */
1219 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1220 struct fuse_copy_state *cs, size_t nbytes)
1221 {
1222 ssize_t err;
1223 struct fuse_conn *fc = fud->fc;
1224 struct fuse_iqueue *fiq = &fc->iq;
1225 struct fuse_pqueue *fpq = &fud->pq;
1226 struct fuse_req *req;
1227 struct fuse_in *in;
1228 unsigned reqsize;
1229
1230 if (task_active_pid_ns(current) != fc->pid_ns ||
1231 current_user_ns() != fc->user_ns)
1232 return -EIO;
1233
1234 restart:
1235 spin_lock(&fiq->waitq.lock);
1236 err = -EAGAIN;
1237 if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
1238 !request_pending(fiq))
1239 goto err_unlock;
1240
1241 err = wait_event_interruptible_exclusive_locked(fiq->waitq,
1242 !fiq->connected || request_pending(fiq));
1243 if (err)
1244 goto err_unlock;
1245
1246 err = -ENODEV;
1247 if (!fiq->connected)
1248 goto err_unlock;
1249
1250 if (!list_empty(&fiq->interrupts)) {
1251 req = list_entry(fiq->interrupts.next, struct fuse_req,
1252 intr_entry);
1253 return fuse_read_interrupt(fiq, cs, nbytes, req);
1254 }
1255
1256 if (forget_pending(fiq)) {
1257 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1258 return fuse_read_forget(fc, fiq, cs, nbytes);
1259
1260 if (fiq->forget_batch <= -8)
1261 fiq->forget_batch = 16;
1262 }
1263
1264 req = list_entry(fiq->pending.next, struct fuse_req, list);
1265 clear_bit(FR_PENDING, &req->flags);
1266 list_del_init(&req->list);
1267 spin_unlock(&fiq->waitq.lock);
1268
1269 in = &req->in;
1270 reqsize = in->h.len;
1271 /* If request is too large, reply with an error and restart the read */
1272 if (nbytes < reqsize) {
1273 req->out.h.error = -EIO;
1274 /* SETXATTR is special, since it may contain too large data */
1275 if (in->h.opcode == FUSE_SETXATTR)
1276 req->out.h.error = -E2BIG;
1277 request_end(fc, req);
1278 goto restart;
1279 }
1280 spin_lock(&fpq->lock);
1281 list_add(&req->list, &fpq->io);
1282 spin_unlock(&fpq->lock);
1283 cs->req = req;
1284 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1285 if (!err)
1286 err = fuse_copy_args(cs, in->numargs, in->argpages,
1287 (struct fuse_arg *) in->args, 0);
1288 fuse_copy_finish(cs);
1289 spin_lock(&fpq->lock);
1290 clear_bit(FR_LOCKED, &req->flags);
1291 if (!fpq->connected) {
1292 err = -ENODEV;
1293 goto out_end;
1294 }
1295 if (err) {
1296 req->out.h.error = -EIO;
1297 goto out_end;
1298 }
1299 if (!test_bit(FR_ISREPLY, &req->flags)) {
1300 err = reqsize;
1301 goto out_end;
1302 }
1303 list_move_tail(&req->list, &fpq->processing);
1304 spin_unlock(&fpq->lock);
1305 set_bit(FR_SENT, &req->flags);
1306 /* matches barrier in request_wait_answer() */
1307 smp_mb__after_atomic();
1308 if (test_bit(FR_INTERRUPTED, &req->flags))
1309 queue_interrupt(fiq, req);
1310
1311 return reqsize;
1312
1313 out_end:
1314 if (!test_bit(FR_PRIVATE, &req->flags))
1315 list_del_init(&req->list);
1316 spin_unlock(&fpq->lock);
1317 request_end(fc, req);
1318 return err;
1319
1320 err_unlock:
1321 spin_unlock(&fiq->waitq.lock);
1322 return err;
1323 }
1324
1325 static int fuse_dev_open(struct inode *inode, struct file *file)
1326 {
1327 /*
1328 * The fuse device's file's private_data is used to hold
1329 * the fuse_conn(ection) when it is mounted, and is used to
1330 * keep track of whether the file has been mounted already.
1331 */
1332 file->private_data = NULL;
1333 return 0;
1334 }
1335
1336 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1337 {
1338 struct fuse_copy_state cs;
1339 struct file *file = iocb->ki_filp;
1340 struct fuse_dev *fud = fuse_get_dev(file);
1341
1342 if (!fud)
1343 return -EPERM;
1344
1345 if (!iter_is_iovec(to))
1346 return -EINVAL;
1347
1348 fuse_copy_init(&cs, 1, to);
1349
1350 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1351 }
1352
1353 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1354 struct pipe_inode_info *pipe,
1355 size_t len, unsigned int flags)
1356 {
1357 int total, ret;
1358 int page_nr = 0;
1359 struct pipe_buffer *bufs;
1360 struct fuse_copy_state cs;
1361 struct fuse_dev *fud = fuse_get_dev(in);
1362
1363 if (!fud)
1364 return -EPERM;
1365
1366 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1367 if (!bufs)
1368 return -ENOMEM;
1369
1370 fuse_copy_init(&cs, 1, NULL);
1371 cs.pipebufs = bufs;
1372 cs.pipe = pipe;
1373 ret = fuse_dev_do_read(fud, in, &cs, len);
1374 if (ret < 0)
1375 goto out;
1376
1377 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1378 ret = -EIO;
1379 goto out;
1380 }
1381
1382 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1383 /*
1384 * Need to be careful about this. Having buf->ops in module
1385 * code can Oops if the buffer persists after module unload.
1386 */
1387 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1388 bufs[page_nr].flags = 0;
1389 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1390 if (unlikely(ret < 0))
1391 break;
1392 }
1393 if (total)
1394 ret = total;
1395 out:
1396 for (; page_nr < cs.nr_segs; page_nr++)
1397 put_page(bufs[page_nr].page);
1398
1399 kfree(bufs);
1400 return ret;
1401 }
1402
1403 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1404 struct fuse_copy_state *cs)
1405 {
1406 struct fuse_notify_poll_wakeup_out outarg;
1407 int err = -EINVAL;
1408
1409 if (size != sizeof(outarg))
1410 goto err;
1411
1412 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1413 if (err)
1414 goto err;
1415
1416 fuse_copy_finish(cs);
1417 return fuse_notify_poll_wakeup(fc, &outarg);
1418
1419 err:
1420 fuse_copy_finish(cs);
1421 return err;
1422 }
1423
1424 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1425 struct fuse_copy_state *cs)
1426 {
1427 struct fuse_notify_inval_inode_out outarg;
1428 int err = -EINVAL;
1429
1430 if (size != sizeof(outarg))
1431 goto err;
1432
1433 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1434 if (err)
1435 goto err;
1436 fuse_copy_finish(cs);
1437
1438 down_read(&fc->killsb);
1439 err = -ENOENT;
1440 if (fc->sb) {
1441 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1442 outarg.off, outarg.len);
1443 }
1444 up_read(&fc->killsb);
1445 return err;
1446
1447 err:
1448 fuse_copy_finish(cs);
1449 return err;
1450 }
1451
1452 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1453 struct fuse_copy_state *cs)
1454 {
1455 struct fuse_notify_inval_entry_out outarg;
1456 int err = -ENOMEM;
1457 char *buf;
1458 struct qstr name;
1459
1460 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1461 if (!buf)
1462 goto err;
1463
1464 err = -EINVAL;
1465 if (size < sizeof(outarg))
1466 goto err;
1467
1468 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1469 if (err)
1470 goto err;
1471
1472 err = -ENAMETOOLONG;
1473 if (outarg.namelen > FUSE_NAME_MAX)
1474 goto err;
1475
1476 err = -EINVAL;
1477 if (size != sizeof(outarg) + outarg.namelen + 1)
1478 goto err;
1479
1480 name.name = buf;
1481 name.len = outarg.namelen;
1482 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1483 if (err)
1484 goto err;
1485 fuse_copy_finish(cs);
1486 buf[outarg.namelen] = 0;
1487
1488 down_read(&fc->killsb);
1489 err = -ENOENT;
1490 if (fc->sb)
1491 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1492 up_read(&fc->killsb);
1493 kfree(buf);
1494 return err;
1495
1496 err:
1497 kfree(buf);
1498 fuse_copy_finish(cs);
1499 return err;
1500 }
1501
1502 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1503 struct fuse_copy_state *cs)
1504 {
1505 struct fuse_notify_delete_out outarg;
1506 int err = -ENOMEM;
1507 char *buf;
1508 struct qstr name;
1509
1510 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1511 if (!buf)
1512 goto err;
1513
1514 err = -EINVAL;
1515 if (size < sizeof(outarg))
1516 goto err;
1517
1518 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1519 if (err)
1520 goto err;
1521
1522 err = -ENAMETOOLONG;
1523 if (outarg.namelen > FUSE_NAME_MAX)
1524 goto err;
1525
1526 err = -EINVAL;
1527 if (size != sizeof(outarg) + outarg.namelen + 1)
1528 goto err;
1529
1530 name.name = buf;
1531 name.len = outarg.namelen;
1532 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1533 if (err)
1534 goto err;
1535 fuse_copy_finish(cs);
1536 buf[outarg.namelen] = 0;
1537
1538 down_read(&fc->killsb);
1539 err = -ENOENT;
1540 if (fc->sb)
1541 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1542 outarg.child, &name);
1543 up_read(&fc->killsb);
1544 kfree(buf);
1545 return err;
1546
1547 err:
1548 kfree(buf);
1549 fuse_copy_finish(cs);
1550 return err;
1551 }
1552
1553 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1554 struct fuse_copy_state *cs)
1555 {
1556 struct fuse_notify_store_out outarg;
1557 struct inode *inode;
1558 struct address_space *mapping;
1559 u64 nodeid;
1560 int err;
1561 pgoff_t index;
1562 unsigned int offset;
1563 unsigned int num;
1564 loff_t file_size;
1565 loff_t end;
1566
1567 err = -EINVAL;
1568 if (size < sizeof(outarg))
1569 goto out_finish;
1570
1571 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1572 if (err)
1573 goto out_finish;
1574
1575 err = -EINVAL;
1576 if (size - sizeof(outarg) != outarg.size)
1577 goto out_finish;
1578
1579 nodeid = outarg.nodeid;
1580
1581 down_read(&fc->killsb);
1582
1583 err = -ENOENT;
1584 if (!fc->sb)
1585 goto out_up_killsb;
1586
1587 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1588 if (!inode)
1589 goto out_up_killsb;
1590
1591 mapping = inode->i_mapping;
1592 index = outarg.offset >> PAGE_SHIFT;
1593 offset = outarg.offset & ~PAGE_MASK;
1594 file_size = i_size_read(inode);
1595 end = outarg.offset + outarg.size;
1596 if (end > file_size) {
1597 file_size = end;
1598 fuse_write_update_size(inode, file_size);
1599 }
1600
1601 num = outarg.size;
1602 while (num) {
1603 struct page *page;
1604 unsigned int this_num;
1605
1606 err = -ENOMEM;
1607 page = find_or_create_page(mapping, index,
1608 mapping_gfp_mask(mapping));
1609 if (!page)
1610 goto out_iput;
1611
1612 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1613 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1614 if (!err && offset == 0 &&
1615 (this_num == PAGE_SIZE || file_size == end))
1616 SetPageUptodate(page);
1617 unlock_page(page);
1618 put_page(page);
1619
1620 if (err)
1621 goto out_iput;
1622
1623 num -= this_num;
1624 offset = 0;
1625 index++;
1626 }
1627
1628 err = 0;
1629
1630 out_iput:
1631 iput(inode);
1632 out_up_killsb:
1633 up_read(&fc->killsb);
1634 out_finish:
1635 fuse_copy_finish(cs);
1636 return err;
1637 }
1638
1639 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1640 {
1641 release_pages(req->pages, req->num_pages, false);
1642 }
1643
1644 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1645 struct fuse_notify_retrieve_out *outarg)
1646 {
1647 int err;
1648 struct address_space *mapping = inode->i_mapping;
1649 struct fuse_req *req;
1650 pgoff_t index;
1651 loff_t file_size;
1652 unsigned int num;
1653 unsigned int offset;
1654 size_t total_len = 0;
1655 int num_pages;
1656
1657 offset = outarg->offset & ~PAGE_MASK;
1658 file_size = i_size_read(inode);
1659
1660 num = outarg->size;
1661 if (outarg->offset > file_size)
1662 num = 0;
1663 else if (outarg->offset + num > file_size)
1664 num = file_size - outarg->offset;
1665
1666 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1667 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1668
1669 req = fuse_get_req(fc, num_pages);
1670 if (IS_ERR(req))
1671 return PTR_ERR(req);
1672
1673 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1674 req->in.h.nodeid = outarg->nodeid;
1675 req->in.numargs = 2;
1676 req->in.argpages = 1;
1677 req->page_descs[0].offset = offset;
1678 req->end = fuse_retrieve_end;
1679
1680 index = outarg->offset >> PAGE_SHIFT;
1681
1682 while (num && req->num_pages < num_pages) {
1683 struct page *page;
1684 unsigned int this_num;
1685
1686 page = find_get_page(mapping, index);
1687 if (!page)
1688 break;
1689
1690 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1691 req->pages[req->num_pages] = page;
1692 req->page_descs[req->num_pages].length = this_num;
1693 req->num_pages++;
1694
1695 offset = 0;
1696 num -= this_num;
1697 total_len += this_num;
1698 index++;
1699 }
1700 req->misc.retrieve_in.offset = outarg->offset;
1701 req->misc.retrieve_in.size = total_len;
1702 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1703 req->in.args[0].value = &req->misc.retrieve_in;
1704 req->in.args[1].size = total_len;
1705
1706 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1707 if (err)
1708 fuse_retrieve_end(fc, req);
1709
1710 return err;
1711 }
1712
1713 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1714 struct fuse_copy_state *cs)
1715 {
1716 struct fuse_notify_retrieve_out outarg;
1717 struct inode *inode;
1718 int err;
1719
1720 err = -EINVAL;
1721 if (size != sizeof(outarg))
1722 goto copy_finish;
1723
1724 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1725 if (err)
1726 goto copy_finish;
1727
1728 fuse_copy_finish(cs);
1729
1730 down_read(&fc->killsb);
1731 err = -ENOENT;
1732 if (fc->sb) {
1733 u64 nodeid = outarg.nodeid;
1734
1735 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1736 if (inode) {
1737 err = fuse_retrieve(fc, inode, &outarg);
1738 iput(inode);
1739 }
1740 }
1741 up_read(&fc->killsb);
1742
1743 return err;
1744
1745 copy_finish:
1746 fuse_copy_finish(cs);
1747 return err;
1748 }
1749
1750 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1751 unsigned int size, struct fuse_copy_state *cs)
1752 {
1753 /* Don't try to move pages (yet) */
1754 cs->move_pages = 0;
1755
1756 switch (code) {
1757 case FUSE_NOTIFY_POLL:
1758 return fuse_notify_poll(fc, size, cs);
1759
1760 case FUSE_NOTIFY_INVAL_INODE:
1761 return fuse_notify_inval_inode(fc, size, cs);
1762
1763 case FUSE_NOTIFY_INVAL_ENTRY:
1764 return fuse_notify_inval_entry(fc, size, cs);
1765
1766 case FUSE_NOTIFY_STORE:
1767 return fuse_notify_store(fc, size, cs);
1768
1769 case FUSE_NOTIFY_RETRIEVE:
1770 return fuse_notify_retrieve(fc, size, cs);
1771
1772 case FUSE_NOTIFY_DELETE:
1773 return fuse_notify_delete(fc, size, cs);
1774
1775 default:
1776 fuse_copy_finish(cs);
1777 return -EINVAL;
1778 }
1779 }
1780
1781 /* Look up request on processing list by unique ID */
1782 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1783 {
1784 struct fuse_req *req;
1785
1786 list_for_each_entry(req, &fpq->processing, list) {
1787 if (req->in.h.unique == unique || req->intr_unique == unique)
1788 return req;
1789 }
1790 return NULL;
1791 }
1792
1793 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1794 unsigned nbytes)
1795 {
1796 unsigned reqsize = sizeof(struct fuse_out_header);
1797
1798 if (out->h.error)
1799 return nbytes != reqsize ? -EINVAL : 0;
1800
1801 reqsize += len_args(out->numargs, out->args);
1802
1803 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1804 return -EINVAL;
1805 else if (reqsize > nbytes) {
1806 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1807 unsigned diffsize = reqsize - nbytes;
1808 if (diffsize > lastarg->size)
1809 return -EINVAL;
1810 lastarg->size -= diffsize;
1811 }
1812 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1813 out->page_zeroing);
1814 }
1815
1816 /*
1817 * Write a single reply to a request. First the header is copied from
1818 * the write buffer. The request is then searched on the processing
1819 * list by the unique ID found in the header. If found, then remove
1820 * it from the list and copy the rest of the buffer to the request.
1821 * The request is finished by calling request_end()
1822 */
1823 static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1824 struct fuse_copy_state *cs, size_t nbytes)
1825 {
1826 int err;
1827 struct fuse_conn *fc = fud->fc;
1828 struct fuse_pqueue *fpq = &fud->pq;
1829 struct fuse_req *req;
1830 struct fuse_out_header oh;
1831
1832 if (task_active_pid_ns(current) != fc->pid_ns ||
1833 current_user_ns() != fc->user_ns)
1834 return -EIO;
1835
1836 if (nbytes < sizeof(struct fuse_out_header))
1837 return -EINVAL;
1838
1839 err = fuse_copy_one(cs, &oh, sizeof(oh));
1840 if (err)
1841 goto err_finish;
1842
1843 err = -EINVAL;
1844 if (oh.len != nbytes)
1845 goto err_finish;
1846
1847 /*
1848 * Zero oh.unique indicates unsolicited notification message
1849 * and error contains notification code.
1850 */
1851 if (!oh.unique) {
1852 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1853 return err ? err : nbytes;
1854 }
1855
1856 err = -EINVAL;
1857 if (oh.error <= -1000 || oh.error > 0)
1858 goto err_finish;
1859
1860 spin_lock(&fpq->lock);
1861 err = -ENOENT;
1862 if (!fpq->connected)
1863 goto err_unlock_pq;
1864
1865 req = request_find(fpq, oh.unique);
1866 if (!req)
1867 goto err_unlock_pq;
1868
1869 /* Is it an interrupt reply? */
1870 if (req->intr_unique == oh.unique) {
1871 spin_unlock(&fpq->lock);
1872
1873 err = -EINVAL;
1874 if (nbytes != sizeof(struct fuse_out_header))
1875 goto err_finish;
1876
1877 if (oh.error == -ENOSYS)
1878 fc->no_interrupt = 1;
1879 else if (oh.error == -EAGAIN)
1880 queue_interrupt(&fc->iq, req);
1881
1882 fuse_copy_finish(cs);
1883 return nbytes;
1884 }
1885
1886 clear_bit(FR_SENT, &req->flags);
1887 list_move(&req->list, &fpq->io);
1888 req->out.h = oh;
1889 set_bit(FR_LOCKED, &req->flags);
1890 spin_unlock(&fpq->lock);
1891 cs->req = req;
1892 if (!req->out.page_replace)
1893 cs->move_pages = 0;
1894
1895 err = copy_out_args(cs, &req->out, nbytes);
1896 fuse_copy_finish(cs);
1897
1898 spin_lock(&fpq->lock);
1899 clear_bit(FR_LOCKED, &req->flags);
1900 if (!fpq->connected)
1901 err = -ENOENT;
1902 else if (err)
1903 req->out.h.error = -EIO;
1904 if (!test_bit(FR_PRIVATE, &req->flags))
1905 list_del_init(&req->list);
1906 spin_unlock(&fpq->lock);
1907
1908 request_end(fc, req);
1909
1910 return err ? err : nbytes;
1911
1912 err_unlock_pq:
1913 spin_unlock(&fpq->lock);
1914 err_finish:
1915 fuse_copy_finish(cs);
1916 return err;
1917 }
1918
1919 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1920 {
1921 struct fuse_copy_state cs;
1922 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1923
1924 if (!fud)
1925 return -EPERM;
1926
1927 if (!iter_is_iovec(from))
1928 return -EINVAL;
1929
1930 fuse_copy_init(&cs, 0, from);
1931
1932 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
1933 }
1934
1935 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1936 struct file *out, loff_t *ppos,
1937 size_t len, unsigned int flags)
1938 {
1939 unsigned nbuf;
1940 unsigned idx;
1941 struct pipe_buffer *bufs;
1942 struct fuse_copy_state cs;
1943 struct fuse_dev *fud;
1944 size_t rem;
1945 ssize_t ret;
1946
1947 fud = fuse_get_dev(out);
1948 if (!fud)
1949 return -EPERM;
1950
1951 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1952 if (!bufs)
1953 return -ENOMEM;
1954
1955 pipe_lock(pipe);
1956 nbuf = 0;
1957 rem = 0;
1958 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1959 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1960
1961 ret = -EINVAL;
1962 if (rem < len) {
1963 pipe_unlock(pipe);
1964 goto out;
1965 }
1966
1967 rem = len;
1968 while (rem) {
1969 struct pipe_buffer *ibuf;
1970 struct pipe_buffer *obuf;
1971
1972 BUG_ON(nbuf >= pipe->buffers);
1973 BUG_ON(!pipe->nrbufs);
1974 ibuf = &pipe->bufs[pipe->curbuf];
1975 obuf = &bufs[nbuf];
1976
1977 if (rem >= ibuf->len) {
1978 *obuf = *ibuf;
1979 ibuf->ops = NULL;
1980 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1981 pipe->nrbufs--;
1982 } else {
1983 pipe_buf_get(pipe, ibuf);
1984 *obuf = *ibuf;
1985 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1986 obuf->len = rem;
1987 ibuf->offset += obuf->len;
1988 ibuf->len -= obuf->len;
1989 }
1990 nbuf++;
1991 rem -= obuf->len;
1992 }
1993 pipe_unlock(pipe);
1994
1995 fuse_copy_init(&cs, 0, NULL);
1996 cs.pipebufs = bufs;
1997 cs.nr_segs = nbuf;
1998 cs.pipe = pipe;
1999
2000 if (flags & SPLICE_F_MOVE)
2001 cs.move_pages = 1;
2002
2003 ret = fuse_dev_do_write(fud, &cs, len);
2004
2005 for (idx = 0; idx < nbuf; idx++)
2006 pipe_buf_release(pipe, &bufs[idx]);
2007
2008 out:
2009 kfree(bufs);
2010 return ret;
2011 }
2012
2013 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2014 {
2015 unsigned mask = POLLOUT | POLLWRNORM;
2016 struct fuse_iqueue *fiq;
2017 struct fuse_dev *fud = fuse_get_dev(file);
2018
2019 if (!fud)
2020 return POLLERR;
2021
2022 fiq = &fud->fc->iq;
2023 poll_wait(file, &fiq->waitq, wait);
2024
2025 spin_lock(&fiq->waitq.lock);
2026 if (!fiq->connected)
2027 mask = POLLERR;
2028 else if (request_pending(fiq))
2029 mask |= POLLIN | POLLRDNORM;
2030 spin_unlock(&fiq->waitq.lock);
2031
2032 return mask;
2033 }
2034
2035 /*
2036 * Abort all requests on the given list (pending or processing)
2037 *
2038 * This function releases and reacquires fc->lock
2039 */
2040 static void end_requests(struct fuse_conn *fc, struct list_head *head)
2041 {
2042 while (!list_empty(head)) {
2043 struct fuse_req *req;
2044 req = list_entry(head->next, struct fuse_req, list);
2045 req->out.h.error = -ECONNABORTED;
2046 clear_bit(FR_SENT, &req->flags);
2047 list_del_init(&req->list);
2048 request_end(fc, req);
2049 }
2050 }
2051
2052 static void end_polls(struct fuse_conn *fc)
2053 {
2054 struct rb_node *p;
2055
2056 p = rb_first(&fc->polled_files);
2057
2058 while (p) {
2059 struct fuse_file *ff;
2060 ff = rb_entry(p, struct fuse_file, polled_node);
2061 wake_up_interruptible_all(&ff->poll_wait);
2062
2063 p = rb_next(p);
2064 }
2065 }
2066
2067 /*
2068 * Abort all requests.
2069 *
2070 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2071 * filesystem.
2072 *
2073 * The same effect is usually achievable through killing the filesystem daemon
2074 * and all users of the filesystem. The exception is the combination of an
2075 * asynchronous request and the tricky deadlock (see
2076 * Documentation/filesystems/fuse.txt).
2077 *
2078 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2079 * requests, they should be finished off immediately. Locked requests will be
2080 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2081 * requests. It is possible that some request will finish before we can. This
2082 * is OK, the request will in that case be removed from the list before we touch
2083 * it.
2084 */
2085 void fuse_abort_conn(struct fuse_conn *fc)
2086 {
2087 struct fuse_iqueue *fiq = &fc->iq;
2088
2089 spin_lock(&fc->lock);
2090 if (fc->connected) {
2091 struct fuse_dev *fud;
2092 struct fuse_req *req, *next;
2093 LIST_HEAD(to_end1);
2094 LIST_HEAD(to_end2);
2095
2096 fc->connected = 0;
2097 fc->blocked = 0;
2098 fuse_set_initialized(fc);
2099 list_for_each_entry(fud, &fc->devices, entry) {
2100 struct fuse_pqueue *fpq = &fud->pq;
2101
2102 spin_lock(&fpq->lock);
2103 fpq->connected = 0;
2104 list_for_each_entry_safe(req, next, &fpq->io, list) {
2105 req->out.h.error = -ECONNABORTED;
2106 spin_lock(&req->waitq.lock);
2107 set_bit(FR_ABORTED, &req->flags);
2108 if (!test_bit(FR_LOCKED, &req->flags)) {
2109 set_bit(FR_PRIVATE, &req->flags);
2110 list_move(&req->list, &to_end1);
2111 }
2112 spin_unlock(&req->waitq.lock);
2113 }
2114 list_splice_init(&fpq->processing, &to_end2);
2115 spin_unlock(&fpq->lock);
2116 }
2117 fc->max_background = UINT_MAX;
2118 flush_bg_queue(fc);
2119
2120 spin_lock(&fiq->waitq.lock);
2121 fiq->connected = 0;
2122 list_splice_init(&fiq->pending, &to_end2);
2123 list_for_each_entry(req, &to_end2, list)
2124 clear_bit(FR_PENDING, &req->flags);
2125 while (forget_pending(fiq))
2126 kfree(dequeue_forget(fiq, 1, NULL));
2127 wake_up_all_locked(&fiq->waitq);
2128 spin_unlock(&fiq->waitq.lock);
2129 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2130 end_polls(fc);
2131 wake_up_all(&fc->blocked_waitq);
2132 spin_unlock(&fc->lock);
2133
2134 while (!list_empty(&to_end1)) {
2135 req = list_first_entry(&to_end1, struct fuse_req, list);
2136 __fuse_get_request(req);
2137 list_del_init(&req->list);
2138 request_end(fc, req);
2139 }
2140 end_requests(fc, &to_end2);
2141 } else {
2142 spin_unlock(&fc->lock);
2143 }
2144 }
2145 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2146
2147 int fuse_dev_release(struct inode *inode, struct file *file)
2148 {
2149 struct fuse_dev *fud = fuse_get_dev(file);
2150
2151 if (fud) {
2152 struct fuse_conn *fc = fud->fc;
2153 struct fuse_pqueue *fpq = &fud->pq;
2154
2155 WARN_ON(!list_empty(&fpq->io));
2156 end_requests(fc, &fpq->processing);
2157 /* Are we the last open device? */
2158 if (atomic_dec_and_test(&fc->dev_count)) {
2159 WARN_ON(fc->iq.fasync != NULL);
2160 fuse_abort_conn(fc);
2161 }
2162 fuse_dev_free(fud);
2163 }
2164 return 0;
2165 }
2166 EXPORT_SYMBOL_GPL(fuse_dev_release);
2167
2168 static int fuse_dev_fasync(int fd, struct file *file, int on)
2169 {
2170 struct fuse_dev *fud = fuse_get_dev(file);
2171
2172 if (!fud)
2173 return -EPERM;
2174
2175 /* No locking - fasync_helper does its own locking */
2176 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2177 }
2178
2179 static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2180 {
2181 struct fuse_dev *fud;
2182
2183 if (new->private_data)
2184 return -EINVAL;
2185
2186 fud = fuse_dev_alloc(fc);
2187 if (!fud)
2188 return -ENOMEM;
2189
2190 new->private_data = fud;
2191 atomic_inc(&fc->dev_count);
2192
2193 return 0;
2194 }
2195
2196 static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2197 unsigned long arg)
2198 {
2199 int err = -ENOTTY;
2200
2201 if (cmd == FUSE_DEV_IOC_CLONE) {
2202 int oldfd;
2203
2204 err = -EFAULT;
2205 if (!get_user(oldfd, (__u32 __user *) arg)) {
2206 struct file *old = fget(oldfd);
2207
2208 err = -EINVAL;
2209 if (old) {
2210 struct fuse_dev *fud = NULL;
2211
2212 /*
2213 * Check against file->f_op because CUSE
2214 * uses the same ioctl handler.
2215 */
2216 if (old->f_op == file->f_op &&
2217 old->f_cred->user_ns == file->f_cred->user_ns)
2218 fud = fuse_get_dev(old);
2219
2220 if (fud) {
2221 mutex_lock(&fuse_mutex);
2222 err = fuse_device_clone(fud->fc, file);
2223 mutex_unlock(&fuse_mutex);
2224 }
2225 fput(old);
2226 }
2227 }
2228 }
2229 return err;
2230 }
2231
2232 const struct file_operations fuse_dev_operations = {
2233 .owner = THIS_MODULE,
2234 .open = fuse_dev_open,
2235 .llseek = no_llseek,
2236 .read_iter = fuse_dev_read,
2237 .splice_read = fuse_dev_splice_read,
2238 .write_iter = fuse_dev_write,
2239 .splice_write = fuse_dev_splice_write,
2240 .poll = fuse_dev_poll,
2241 .release = fuse_dev_release,
2242 .fasync = fuse_dev_fasync,
2243 .unlocked_ioctl = fuse_dev_ioctl,
2244 .compat_ioctl = fuse_dev_ioctl,
2245 };
2246 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2247
2248 static struct miscdevice fuse_miscdevice = {
2249 .minor = FUSE_MINOR,
2250 .name = "fuse",
2251 .fops = &fuse_dev_operations,
2252 };
2253
2254 int __init fuse_dev_init(void)
2255 {
2256 int err = -ENOMEM;
2257 fuse_req_cachep = kmem_cache_create("fuse_request",
2258 sizeof(struct fuse_req),
2259 0, 0, NULL);
2260 if (!fuse_req_cachep)
2261 goto out;
2262
2263 err = misc_register(&fuse_miscdevice);
2264 if (err)
2265 goto out_cache_clean;
2266
2267 return 0;
2268
2269 out_cache_clean:
2270 kmem_cache_destroy(fuse_req_cachep);
2271 out:
2272 return err;
2273 }
2274
2275 void fuse_dev_cleanup(void)
2276 {
2277 misc_deregister(&fuse_miscdevice);
2278 kmem_cache_destroy(fuse_req_cachep);
2279 }