]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/fuse/dev.c
b28cbc0532ce082dcd1e76b0de248c362afc60b2
[mirror_ubuntu-bionic-kernel.git] / fs / fuse / dev.c
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/sched/signal.h>
15 #include <linux/uio.h>
16 #include <linux/miscdevice.h>
17 #include <linux/pagemap.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/pipe_fs_i.h>
21 #include <linux/swap.h>
22 #include <linux/splice.h>
23 #include <linux/sched.h>
24
25 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
26 MODULE_ALIAS("devname:fuse");
27
28 static struct kmem_cache *fuse_req_cachep;
29
30 static struct fuse_dev *fuse_get_dev(struct file *file)
31 {
32 /*
33 * Lockless access is OK, because file->private data is set
34 * once during mount and is valid until the file is released.
35 */
36 return READ_ONCE(file->private_data);
37 }
38
39 static void fuse_request_init(struct fuse_req *req, struct page **pages,
40 struct fuse_page_desc *page_descs,
41 unsigned npages)
42 {
43 memset(req, 0, sizeof(*req));
44 memset(pages, 0, sizeof(*pages) * npages);
45 memset(page_descs, 0, sizeof(*page_descs) * npages);
46 INIT_LIST_HEAD(&req->list);
47 INIT_LIST_HEAD(&req->intr_entry);
48 init_waitqueue_head(&req->waitq);
49 refcount_set(&req->count, 1);
50 req->pages = pages;
51 req->page_descs = page_descs;
52 req->max_pages = npages;
53 __set_bit(FR_PENDING, &req->flags);
54 }
55
56 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
57 {
58 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
59 if (req) {
60 struct page **pages;
61 struct fuse_page_desc *page_descs;
62
63 if (npages <= FUSE_REQ_INLINE_PAGES) {
64 pages = req->inline_pages;
65 page_descs = req->inline_page_descs;
66 } else {
67 pages = kmalloc(sizeof(struct page *) * npages, flags);
68 page_descs = kmalloc(sizeof(struct fuse_page_desc) *
69 npages, flags);
70 }
71
72 if (!pages || !page_descs) {
73 kfree(pages);
74 kfree(page_descs);
75 kmem_cache_free(fuse_req_cachep, req);
76 return NULL;
77 }
78
79 fuse_request_init(req, pages, page_descs, npages);
80 }
81 return req;
82 }
83
84 struct fuse_req *fuse_request_alloc(unsigned npages)
85 {
86 return __fuse_request_alloc(npages, GFP_KERNEL);
87 }
88 EXPORT_SYMBOL_GPL(fuse_request_alloc);
89
90 struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
91 {
92 return __fuse_request_alloc(npages, GFP_NOFS);
93 }
94
95 void fuse_request_free(struct fuse_req *req)
96 {
97 if (req->pages != req->inline_pages) {
98 kfree(req->pages);
99 kfree(req->page_descs);
100 }
101 kmem_cache_free(fuse_req_cachep, req);
102 }
103
104 void __fuse_get_request(struct fuse_req *req)
105 {
106 refcount_inc(&req->count);
107 }
108
109 /* Must be called with > 1 refcount */
110 static void __fuse_put_request(struct fuse_req *req)
111 {
112 refcount_dec(&req->count);
113 }
114
115 static void fuse_req_init_context(struct fuse_conn *fc, struct fuse_req *req)
116 {
117 req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
118 req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
119 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
120 }
121
122 void fuse_set_initialized(struct fuse_conn *fc)
123 {
124 /* Make sure stores before this are seen on another CPU */
125 smp_wmb();
126 fc->initialized = 1;
127 }
128
129 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
130 {
131 return !fc->initialized || (for_background && fc->blocked);
132 }
133
134 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
135 bool for_background)
136 {
137 struct fuse_req *req;
138 int err;
139 atomic_inc(&fc->num_waiting);
140
141 if (fuse_block_alloc(fc, for_background)) {
142 err = -EINTR;
143 if (wait_event_killable_exclusive(fc->blocked_waitq,
144 !fuse_block_alloc(fc, for_background)))
145 goto out;
146 }
147 /* Matches smp_wmb() in fuse_set_initialized() */
148 smp_rmb();
149
150 err = -ENOTCONN;
151 if (!fc->connected)
152 goto out;
153
154 err = -ECONNREFUSED;
155 if (fc->conn_error)
156 goto out;
157
158 req = fuse_request_alloc(npages);
159 err = -ENOMEM;
160 if (!req) {
161 if (for_background)
162 wake_up(&fc->blocked_waitq);
163 goto out;
164 }
165
166 fuse_req_init_context(fc, req);
167 __set_bit(FR_WAITING, &req->flags);
168 if (for_background)
169 __set_bit(FR_BACKGROUND, &req->flags);
170 if (req->in.h.uid == (uid_t)-1 || req->in.h.gid == (gid_t)-1) {
171 fuse_put_request(fc, req);
172 return ERR_PTR(-EOVERFLOW);
173 }
174
175 return req;
176
177 out:
178 atomic_dec(&fc->num_waiting);
179 return ERR_PTR(err);
180 }
181
182 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
183 {
184 return __fuse_get_req(fc, npages, false);
185 }
186 EXPORT_SYMBOL_GPL(fuse_get_req);
187
188 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
189 unsigned npages)
190 {
191 return __fuse_get_req(fc, npages, true);
192 }
193 EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
194
195 /*
196 * Return request in fuse_file->reserved_req. However that may
197 * currently be in use. If that is the case, wait for it to become
198 * available.
199 */
200 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
201 struct file *file)
202 {
203 struct fuse_req *req = NULL;
204 struct fuse_file *ff = file->private_data;
205
206 do {
207 wait_event(fc->reserved_req_waitq, ff->reserved_req);
208 spin_lock(&fc->lock);
209 if (ff->reserved_req) {
210 req = ff->reserved_req;
211 ff->reserved_req = NULL;
212 req->stolen_file = get_file(file);
213 }
214 spin_unlock(&fc->lock);
215 } while (!req);
216
217 return req;
218 }
219
220 /*
221 * Put stolen request back into fuse_file->reserved_req
222 */
223 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
224 {
225 struct file *file = req->stolen_file;
226 struct fuse_file *ff = file->private_data;
227
228 spin_lock(&fc->lock);
229 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
230 BUG_ON(ff->reserved_req);
231 ff->reserved_req = req;
232 wake_up_all(&fc->reserved_req_waitq);
233 spin_unlock(&fc->lock);
234 fput(file);
235 }
236
237 /*
238 * Gets a requests for a file operation, always succeeds
239 *
240 * This is used for sending the FLUSH request, which must get to
241 * userspace, due to POSIX locks which may need to be unlocked.
242 *
243 * If allocation fails due to OOM, use the reserved request in
244 * fuse_file.
245 *
246 * This is very unlikely to deadlock accidentally, since the
247 * filesystem should not have it's own file open. If deadlock is
248 * intentional, it can still be broken by "aborting" the filesystem.
249 */
250 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
251 struct file *file)
252 {
253 struct fuse_req *req;
254
255 atomic_inc(&fc->num_waiting);
256 wait_event(fc->blocked_waitq, fc->initialized);
257 /* Matches smp_wmb() in fuse_set_initialized() */
258 smp_rmb();
259 req = fuse_request_alloc(0);
260 if (!req)
261 req = get_reserved_req(fc, file);
262
263 fuse_req_init_context(fc, req);
264 __set_bit(FR_WAITING, &req->flags);
265 __clear_bit(FR_BACKGROUND, &req->flags);
266 return req;
267 }
268
269 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
270 {
271 if (refcount_dec_and_test(&req->count)) {
272 if (test_bit(FR_BACKGROUND, &req->flags)) {
273 /*
274 * We get here in the unlikely case that a background
275 * request was allocated but not sent
276 */
277 spin_lock(&fc->lock);
278 if (!fc->blocked)
279 wake_up(&fc->blocked_waitq);
280 spin_unlock(&fc->lock);
281 }
282
283 if (test_bit(FR_WAITING, &req->flags)) {
284 __clear_bit(FR_WAITING, &req->flags);
285 atomic_dec(&fc->num_waiting);
286 }
287
288 if (req->stolen_file)
289 put_reserved_req(fc, req);
290 else
291 fuse_request_free(req);
292 }
293 }
294 EXPORT_SYMBOL_GPL(fuse_put_request);
295
296 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
297 {
298 unsigned nbytes = 0;
299 unsigned i;
300
301 for (i = 0; i < numargs; i++)
302 nbytes += args[i].size;
303
304 return nbytes;
305 }
306
307 static u64 fuse_get_unique(struct fuse_iqueue *fiq)
308 {
309 return ++fiq->reqctr;
310 }
311
312 static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
313 {
314 req->in.h.len = sizeof(struct fuse_in_header) +
315 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
316 list_add_tail(&req->list, &fiq->pending);
317 wake_up_locked(&fiq->waitq);
318 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
319 }
320
321 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
322 u64 nodeid, u64 nlookup)
323 {
324 struct fuse_iqueue *fiq = &fc->iq;
325
326 forget->forget_one.nodeid = nodeid;
327 forget->forget_one.nlookup = nlookup;
328
329 spin_lock(&fiq->waitq.lock);
330 if (fiq->connected) {
331 fiq->forget_list_tail->next = forget;
332 fiq->forget_list_tail = forget;
333 wake_up_locked(&fiq->waitq);
334 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
335 } else {
336 kfree(forget);
337 }
338 spin_unlock(&fiq->waitq.lock);
339 }
340
341 static void flush_bg_queue(struct fuse_conn *fc)
342 {
343 while (fc->active_background < fc->max_background &&
344 !list_empty(&fc->bg_queue)) {
345 struct fuse_req *req;
346 struct fuse_iqueue *fiq = &fc->iq;
347
348 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
349 list_del(&req->list);
350 fc->active_background++;
351 spin_lock(&fiq->waitq.lock);
352 req->in.h.unique = fuse_get_unique(fiq);
353 queue_request(fiq, req);
354 spin_unlock(&fiq->waitq.lock);
355 }
356 }
357
358 /*
359 * This function is called when a request is finished. Either a reply
360 * has arrived or it was aborted (and not yet sent) or some error
361 * occurred during communication with userspace, or the device file
362 * was closed. The requester thread is woken up (if still waiting),
363 * the 'end' callback is called if given, else the reference to the
364 * request is released
365 */
366 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
367 {
368 struct fuse_iqueue *fiq = &fc->iq;
369
370 if (test_and_set_bit(FR_FINISHED, &req->flags))
371 goto out_put_req;
372
373 spin_lock(&fiq->waitq.lock);
374 list_del_init(&req->intr_entry);
375 spin_unlock(&fiq->waitq.lock);
376 WARN_ON(test_bit(FR_PENDING, &req->flags));
377 WARN_ON(test_bit(FR_SENT, &req->flags));
378 if (test_bit(FR_BACKGROUND, &req->flags)) {
379 spin_lock(&fc->lock);
380 clear_bit(FR_BACKGROUND, &req->flags);
381 if (fc->num_background == fc->max_background)
382 fc->blocked = 0;
383
384 /* Wake up next waiter, if any */
385 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
386 wake_up(&fc->blocked_waitq);
387
388 if (fc->num_background == fc->congestion_threshold && fc->sb) {
389 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
390 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
391 }
392 fc->num_background--;
393 fc->active_background--;
394 flush_bg_queue(fc);
395 spin_unlock(&fc->lock);
396 }
397 wake_up(&req->waitq);
398 if (req->end)
399 req->end(fc, req);
400 out_put_req:
401 fuse_put_request(fc, req);
402 }
403
404 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
405 {
406 spin_lock(&fiq->waitq.lock);
407 if (test_bit(FR_FINISHED, &req->flags)) {
408 spin_unlock(&fiq->waitq.lock);
409 return;
410 }
411 if (list_empty(&req->intr_entry)) {
412 list_add_tail(&req->intr_entry, &fiq->interrupts);
413 wake_up_locked(&fiq->waitq);
414 }
415 spin_unlock(&fiq->waitq.lock);
416 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
417 }
418
419 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
420 {
421 struct fuse_iqueue *fiq = &fc->iq;
422 int err;
423
424 if (!fc->no_interrupt) {
425 /* Any signal may interrupt this */
426 err = wait_event_interruptible(req->waitq,
427 test_bit(FR_FINISHED, &req->flags));
428 if (!err)
429 return;
430
431 set_bit(FR_INTERRUPTED, &req->flags);
432 /* matches barrier in fuse_dev_do_read() */
433 smp_mb__after_atomic();
434 if (test_bit(FR_SENT, &req->flags))
435 queue_interrupt(fiq, req);
436 }
437
438 if (!test_bit(FR_FORCE, &req->flags)) {
439 /* Only fatal signals may interrupt this */
440 err = wait_event_killable(req->waitq,
441 test_bit(FR_FINISHED, &req->flags));
442 if (!err)
443 return;
444
445 spin_lock(&fiq->waitq.lock);
446 /* Request is not yet in userspace, bail out */
447 if (test_bit(FR_PENDING, &req->flags)) {
448 list_del(&req->list);
449 spin_unlock(&fiq->waitq.lock);
450 __fuse_put_request(req);
451 req->out.h.error = -EINTR;
452 return;
453 }
454 spin_unlock(&fiq->waitq.lock);
455 }
456
457 /*
458 * Either request is already in userspace, or it was forced.
459 * Wait it out.
460 */
461 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
462 }
463
464 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
465 {
466 struct fuse_iqueue *fiq = &fc->iq;
467
468 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
469 spin_lock(&fiq->waitq.lock);
470 if (!fiq->connected) {
471 spin_unlock(&fiq->waitq.lock);
472 req->out.h.error = -ENOTCONN;
473 } else {
474 req->in.h.unique = fuse_get_unique(fiq);
475 queue_request(fiq, req);
476 /* acquire extra reference, since request is still needed
477 after request_end() */
478 __fuse_get_request(req);
479 spin_unlock(&fiq->waitq.lock);
480
481 request_wait_answer(fc, req);
482 /* Pairs with smp_wmb() in request_end() */
483 smp_rmb();
484 }
485 }
486
487 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
488 {
489 __set_bit(FR_ISREPLY, &req->flags);
490 if (!test_bit(FR_WAITING, &req->flags)) {
491 __set_bit(FR_WAITING, &req->flags);
492 atomic_inc(&fc->num_waiting);
493 }
494 __fuse_request_send(fc, req);
495 }
496 EXPORT_SYMBOL_GPL(fuse_request_send);
497
498 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
499 {
500 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
501 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
502
503 if (fc->minor < 9) {
504 switch (args->in.h.opcode) {
505 case FUSE_LOOKUP:
506 case FUSE_CREATE:
507 case FUSE_MKNOD:
508 case FUSE_MKDIR:
509 case FUSE_SYMLINK:
510 case FUSE_LINK:
511 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
512 break;
513 case FUSE_GETATTR:
514 case FUSE_SETATTR:
515 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
516 break;
517 }
518 }
519 if (fc->minor < 12) {
520 switch (args->in.h.opcode) {
521 case FUSE_CREATE:
522 args->in.args[0].size = sizeof(struct fuse_open_in);
523 break;
524 case FUSE_MKNOD:
525 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
526 break;
527 }
528 }
529 }
530
531 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
532 {
533 struct fuse_req *req;
534 ssize_t ret;
535
536 req = fuse_get_req(fc, 0);
537 if (IS_ERR(req))
538 return PTR_ERR(req);
539
540 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
541 fuse_adjust_compat(fc, args);
542
543 req->in.h.opcode = args->in.h.opcode;
544 req->in.h.nodeid = args->in.h.nodeid;
545 req->in.numargs = args->in.numargs;
546 memcpy(req->in.args, args->in.args,
547 args->in.numargs * sizeof(struct fuse_in_arg));
548 req->out.argvar = args->out.argvar;
549 req->out.numargs = args->out.numargs;
550 memcpy(req->out.args, args->out.args,
551 args->out.numargs * sizeof(struct fuse_arg));
552 fuse_request_send(fc, req);
553 ret = req->out.h.error;
554 if (!ret && args->out.argvar) {
555 BUG_ON(args->out.numargs != 1);
556 ret = req->out.args[0].size;
557 }
558 fuse_put_request(fc, req);
559
560 return ret;
561 }
562
563 /*
564 * Called under fc->lock
565 *
566 * fc->connected must have been checked previously
567 */
568 void fuse_request_send_background_locked(struct fuse_conn *fc,
569 struct fuse_req *req)
570 {
571 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
572 if (!test_bit(FR_WAITING, &req->flags)) {
573 __set_bit(FR_WAITING, &req->flags);
574 atomic_inc(&fc->num_waiting);
575 }
576 __set_bit(FR_ISREPLY, &req->flags);
577 fc->num_background++;
578 if (fc->num_background == fc->max_background)
579 fc->blocked = 1;
580 if (fc->num_background == fc->congestion_threshold && fc->sb) {
581 set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
582 set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
583 }
584 list_add_tail(&req->list, &fc->bg_queue);
585 flush_bg_queue(fc);
586 }
587
588 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
589 {
590 BUG_ON(!req->end);
591 spin_lock(&fc->lock);
592 if (fc->connected) {
593 fuse_request_send_background_locked(fc, req);
594 spin_unlock(&fc->lock);
595 } else {
596 spin_unlock(&fc->lock);
597 req->out.h.error = -ENOTCONN;
598 req->end(fc, req);
599 fuse_put_request(fc, req);
600 }
601 }
602 EXPORT_SYMBOL_GPL(fuse_request_send_background);
603
604 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
605 struct fuse_req *req, u64 unique)
606 {
607 int err = -ENODEV;
608 struct fuse_iqueue *fiq = &fc->iq;
609
610 __clear_bit(FR_ISREPLY, &req->flags);
611 req->in.h.unique = unique;
612 spin_lock(&fiq->waitq.lock);
613 if (fiq->connected) {
614 queue_request(fiq, req);
615 err = 0;
616 }
617 spin_unlock(&fiq->waitq.lock);
618
619 return err;
620 }
621
622 void fuse_force_forget(struct file *file, u64 nodeid)
623 {
624 struct inode *inode = file_inode(file);
625 struct fuse_conn *fc = get_fuse_conn(inode);
626 struct fuse_req *req;
627 struct fuse_forget_in inarg;
628
629 memset(&inarg, 0, sizeof(inarg));
630 inarg.nlookup = 1;
631 req = fuse_get_req_nofail_nopages(fc, file);
632 req->in.h.opcode = FUSE_FORGET;
633 req->in.h.nodeid = nodeid;
634 req->in.numargs = 1;
635 req->in.args[0].size = sizeof(inarg);
636 req->in.args[0].value = &inarg;
637 __clear_bit(FR_ISREPLY, &req->flags);
638 __fuse_request_send(fc, req);
639 /* ignore errors */
640 fuse_put_request(fc, req);
641 }
642
643 /*
644 * Lock the request. Up to the next unlock_request() there mustn't be
645 * anything that could cause a page-fault. If the request was already
646 * aborted bail out.
647 */
648 static int lock_request(struct fuse_req *req)
649 {
650 int err = 0;
651 if (req) {
652 spin_lock(&req->waitq.lock);
653 if (test_bit(FR_ABORTED, &req->flags))
654 err = -ENOENT;
655 else
656 set_bit(FR_LOCKED, &req->flags);
657 spin_unlock(&req->waitq.lock);
658 }
659 return err;
660 }
661
662 /*
663 * Unlock request. If it was aborted while locked, caller is responsible
664 * for unlocking and ending the request.
665 */
666 static int unlock_request(struct fuse_req *req)
667 {
668 int err = 0;
669 if (req) {
670 spin_lock(&req->waitq.lock);
671 if (test_bit(FR_ABORTED, &req->flags))
672 err = -ENOENT;
673 else
674 clear_bit(FR_LOCKED, &req->flags);
675 spin_unlock(&req->waitq.lock);
676 }
677 return err;
678 }
679
680 struct fuse_copy_state {
681 int write;
682 struct fuse_req *req;
683 struct iov_iter *iter;
684 struct pipe_buffer *pipebufs;
685 struct pipe_buffer *currbuf;
686 struct pipe_inode_info *pipe;
687 unsigned long nr_segs;
688 struct page *pg;
689 unsigned len;
690 unsigned offset;
691 unsigned move_pages:1;
692 };
693
694 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
695 struct iov_iter *iter)
696 {
697 memset(cs, 0, sizeof(*cs));
698 cs->write = write;
699 cs->iter = iter;
700 }
701
702 /* Unmap and put previous page of userspace buffer */
703 static void fuse_copy_finish(struct fuse_copy_state *cs)
704 {
705 if (cs->currbuf) {
706 struct pipe_buffer *buf = cs->currbuf;
707
708 if (cs->write)
709 buf->len = PAGE_SIZE - cs->len;
710 cs->currbuf = NULL;
711 } else if (cs->pg) {
712 if (cs->write) {
713 flush_dcache_page(cs->pg);
714 set_page_dirty_lock(cs->pg);
715 }
716 put_page(cs->pg);
717 }
718 cs->pg = NULL;
719 }
720
721 /*
722 * Get another pagefull of userspace buffer, and map it to kernel
723 * address space, and lock request
724 */
725 static int fuse_copy_fill(struct fuse_copy_state *cs)
726 {
727 struct page *page;
728 int err;
729
730 err = unlock_request(cs->req);
731 if (err)
732 return err;
733
734 fuse_copy_finish(cs);
735 if (cs->pipebufs) {
736 struct pipe_buffer *buf = cs->pipebufs;
737
738 if (!cs->write) {
739 err = pipe_buf_confirm(cs->pipe, buf);
740 if (err)
741 return err;
742
743 BUG_ON(!cs->nr_segs);
744 cs->currbuf = buf;
745 cs->pg = buf->page;
746 cs->offset = buf->offset;
747 cs->len = buf->len;
748 cs->pipebufs++;
749 cs->nr_segs--;
750 } else {
751 if (cs->nr_segs == cs->pipe->buffers)
752 return -EIO;
753
754 page = alloc_page(GFP_HIGHUSER);
755 if (!page)
756 return -ENOMEM;
757
758 buf->page = page;
759 buf->offset = 0;
760 buf->len = 0;
761
762 cs->currbuf = buf;
763 cs->pg = page;
764 cs->offset = 0;
765 cs->len = PAGE_SIZE;
766 cs->pipebufs++;
767 cs->nr_segs++;
768 }
769 } else {
770 size_t off;
771 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
772 if (err < 0)
773 return err;
774 BUG_ON(!err);
775 cs->len = err;
776 cs->offset = off;
777 cs->pg = page;
778 iov_iter_advance(cs->iter, err);
779 }
780
781 return lock_request(cs->req);
782 }
783
784 /* Do as much copy to/from userspace buffer as we can */
785 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
786 {
787 unsigned ncpy = min(*size, cs->len);
788 if (val) {
789 void *pgaddr = kmap_atomic(cs->pg);
790 void *buf = pgaddr + cs->offset;
791
792 if (cs->write)
793 memcpy(buf, *val, ncpy);
794 else
795 memcpy(*val, buf, ncpy);
796
797 kunmap_atomic(pgaddr);
798 *val += ncpy;
799 }
800 *size -= ncpy;
801 cs->len -= ncpy;
802 cs->offset += ncpy;
803 return ncpy;
804 }
805
806 static int fuse_check_page(struct page *page)
807 {
808 if (page_mapcount(page) ||
809 page->mapping != NULL ||
810 page_count(page) != 1 ||
811 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
812 ~(1 << PG_locked |
813 1 << PG_referenced |
814 1 << PG_uptodate |
815 1 << PG_lru |
816 1 << PG_active |
817 1 << PG_reclaim))) {
818 printk(KERN_WARNING "fuse: trying to steal weird page\n");
819 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
820 return 1;
821 }
822 return 0;
823 }
824
825 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
826 {
827 int err;
828 struct page *oldpage = *pagep;
829 struct page *newpage;
830 struct pipe_buffer *buf = cs->pipebufs;
831
832 err = unlock_request(cs->req);
833 if (err)
834 return err;
835
836 fuse_copy_finish(cs);
837
838 err = pipe_buf_confirm(cs->pipe, buf);
839 if (err)
840 return err;
841
842 BUG_ON(!cs->nr_segs);
843 cs->currbuf = buf;
844 cs->len = buf->len;
845 cs->pipebufs++;
846 cs->nr_segs--;
847
848 if (cs->len != PAGE_SIZE)
849 goto out_fallback;
850
851 if (pipe_buf_steal(cs->pipe, buf) != 0)
852 goto out_fallback;
853
854 newpage = buf->page;
855
856 if (!PageUptodate(newpage))
857 SetPageUptodate(newpage);
858
859 ClearPageMappedToDisk(newpage);
860
861 if (fuse_check_page(newpage) != 0)
862 goto out_fallback_unlock;
863
864 /*
865 * This is a new and locked page, it shouldn't be mapped or
866 * have any special flags on it
867 */
868 if (WARN_ON(page_mapped(oldpage)))
869 goto out_fallback_unlock;
870 if (WARN_ON(page_has_private(oldpage)))
871 goto out_fallback_unlock;
872 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
873 goto out_fallback_unlock;
874 if (WARN_ON(PageMlocked(oldpage)))
875 goto out_fallback_unlock;
876
877 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
878 if (err) {
879 unlock_page(newpage);
880 return err;
881 }
882
883 get_page(newpage);
884
885 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
886 lru_cache_add_file(newpage);
887
888 err = 0;
889 spin_lock(&cs->req->waitq.lock);
890 if (test_bit(FR_ABORTED, &cs->req->flags))
891 err = -ENOENT;
892 else
893 *pagep = newpage;
894 spin_unlock(&cs->req->waitq.lock);
895
896 if (err) {
897 unlock_page(newpage);
898 put_page(newpage);
899 return err;
900 }
901
902 unlock_page(oldpage);
903 put_page(oldpage);
904 cs->len = 0;
905
906 return 0;
907
908 out_fallback_unlock:
909 unlock_page(newpage);
910 out_fallback:
911 cs->pg = buf->page;
912 cs->offset = buf->offset;
913
914 err = lock_request(cs->req);
915 if (err)
916 return err;
917
918 return 1;
919 }
920
921 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
922 unsigned offset, unsigned count)
923 {
924 struct pipe_buffer *buf;
925 int err;
926
927 if (cs->nr_segs == cs->pipe->buffers)
928 return -EIO;
929
930 err = unlock_request(cs->req);
931 if (err)
932 return err;
933
934 fuse_copy_finish(cs);
935
936 buf = cs->pipebufs;
937 get_page(page);
938 buf->page = page;
939 buf->offset = offset;
940 buf->len = count;
941
942 cs->pipebufs++;
943 cs->nr_segs++;
944 cs->len = 0;
945
946 return 0;
947 }
948
949 /*
950 * Copy a page in the request to/from the userspace buffer. Must be
951 * done atomically
952 */
953 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
954 unsigned offset, unsigned count, int zeroing)
955 {
956 int err;
957 struct page *page = *pagep;
958
959 if (page && zeroing && count < PAGE_SIZE)
960 clear_highpage(page);
961
962 while (count) {
963 if (cs->write && cs->pipebufs && page) {
964 return fuse_ref_page(cs, page, offset, count);
965 } else if (!cs->len) {
966 if (cs->move_pages && page &&
967 offset == 0 && count == PAGE_SIZE) {
968 err = fuse_try_move_page(cs, pagep);
969 if (err <= 0)
970 return err;
971 } else {
972 err = fuse_copy_fill(cs);
973 if (err)
974 return err;
975 }
976 }
977 if (page) {
978 void *mapaddr = kmap_atomic(page);
979 void *buf = mapaddr + offset;
980 offset += fuse_copy_do(cs, &buf, &count);
981 kunmap_atomic(mapaddr);
982 } else
983 offset += fuse_copy_do(cs, NULL, &count);
984 }
985 if (page && !cs->write)
986 flush_dcache_page(page);
987 return 0;
988 }
989
990 /* Copy pages in the request to/from userspace buffer */
991 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
992 int zeroing)
993 {
994 unsigned i;
995 struct fuse_req *req = cs->req;
996
997 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
998 int err;
999 unsigned offset = req->page_descs[i].offset;
1000 unsigned count = min(nbytes, req->page_descs[i].length);
1001
1002 err = fuse_copy_page(cs, &req->pages[i], offset, count,
1003 zeroing);
1004 if (err)
1005 return err;
1006
1007 nbytes -= count;
1008 }
1009 return 0;
1010 }
1011
1012 /* Copy a single argument in the request to/from userspace buffer */
1013 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1014 {
1015 while (size) {
1016 if (!cs->len) {
1017 int err = fuse_copy_fill(cs);
1018 if (err)
1019 return err;
1020 }
1021 fuse_copy_do(cs, &val, &size);
1022 }
1023 return 0;
1024 }
1025
1026 /* Copy request arguments to/from userspace buffer */
1027 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1028 unsigned argpages, struct fuse_arg *args,
1029 int zeroing)
1030 {
1031 int err = 0;
1032 unsigned i;
1033
1034 for (i = 0; !err && i < numargs; i++) {
1035 struct fuse_arg *arg = &args[i];
1036 if (i == numargs - 1 && argpages)
1037 err = fuse_copy_pages(cs, arg->size, zeroing);
1038 else
1039 err = fuse_copy_one(cs, arg->value, arg->size);
1040 }
1041 return err;
1042 }
1043
1044 static int forget_pending(struct fuse_iqueue *fiq)
1045 {
1046 return fiq->forget_list_head.next != NULL;
1047 }
1048
1049 static int request_pending(struct fuse_iqueue *fiq)
1050 {
1051 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1052 forget_pending(fiq);
1053 }
1054
1055 /*
1056 * Transfer an interrupt request to userspace
1057 *
1058 * Unlike other requests this is assembled on demand, without a need
1059 * to allocate a separate fuse_req structure.
1060 *
1061 * Called with fiq->waitq.lock held, releases it
1062 */
1063 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1064 struct fuse_copy_state *cs,
1065 size_t nbytes, struct fuse_req *req)
1066 __releases(fiq->waitq.lock)
1067 {
1068 struct fuse_in_header ih;
1069 struct fuse_interrupt_in arg;
1070 unsigned reqsize = sizeof(ih) + sizeof(arg);
1071 int err;
1072
1073 list_del_init(&req->intr_entry);
1074 req->intr_unique = fuse_get_unique(fiq);
1075 memset(&ih, 0, sizeof(ih));
1076 memset(&arg, 0, sizeof(arg));
1077 ih.len = reqsize;
1078 ih.opcode = FUSE_INTERRUPT;
1079 ih.unique = req->intr_unique;
1080 arg.unique = req->in.h.unique;
1081
1082 spin_unlock(&fiq->waitq.lock);
1083 if (nbytes < reqsize)
1084 return -EINVAL;
1085
1086 err = fuse_copy_one(cs, &ih, sizeof(ih));
1087 if (!err)
1088 err = fuse_copy_one(cs, &arg, sizeof(arg));
1089 fuse_copy_finish(cs);
1090
1091 return err ? err : reqsize;
1092 }
1093
1094 static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
1095 unsigned max,
1096 unsigned *countp)
1097 {
1098 struct fuse_forget_link *head = fiq->forget_list_head.next;
1099 struct fuse_forget_link **newhead = &head;
1100 unsigned count;
1101
1102 for (count = 0; *newhead != NULL && count < max; count++)
1103 newhead = &(*newhead)->next;
1104
1105 fiq->forget_list_head.next = *newhead;
1106 *newhead = NULL;
1107 if (fiq->forget_list_head.next == NULL)
1108 fiq->forget_list_tail = &fiq->forget_list_head;
1109
1110 if (countp != NULL)
1111 *countp = count;
1112
1113 return head;
1114 }
1115
1116 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1117 struct fuse_copy_state *cs,
1118 size_t nbytes)
1119 __releases(fiq->waitq.lock)
1120 {
1121 int err;
1122 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
1123 struct fuse_forget_in arg = {
1124 .nlookup = forget->forget_one.nlookup,
1125 };
1126 struct fuse_in_header ih = {
1127 .opcode = FUSE_FORGET,
1128 .nodeid = forget->forget_one.nodeid,
1129 .unique = fuse_get_unique(fiq),
1130 .len = sizeof(ih) + sizeof(arg),
1131 };
1132
1133 spin_unlock(&fiq->waitq.lock);
1134 kfree(forget);
1135 if (nbytes < ih.len)
1136 return -EINVAL;
1137
1138 err = fuse_copy_one(cs, &ih, sizeof(ih));
1139 if (!err)
1140 err = fuse_copy_one(cs, &arg, sizeof(arg));
1141 fuse_copy_finish(cs);
1142
1143 if (err)
1144 return err;
1145
1146 return ih.len;
1147 }
1148
1149 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1150 struct fuse_copy_state *cs, size_t nbytes)
1151 __releases(fiq->waitq.lock)
1152 {
1153 int err;
1154 unsigned max_forgets;
1155 unsigned count;
1156 struct fuse_forget_link *head;
1157 struct fuse_batch_forget_in arg = { .count = 0 };
1158 struct fuse_in_header ih = {
1159 .opcode = FUSE_BATCH_FORGET,
1160 .unique = fuse_get_unique(fiq),
1161 .len = sizeof(ih) + sizeof(arg),
1162 };
1163
1164 if (nbytes < ih.len) {
1165 spin_unlock(&fiq->waitq.lock);
1166 return -EINVAL;
1167 }
1168
1169 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1170 head = dequeue_forget(fiq, max_forgets, &count);
1171 spin_unlock(&fiq->waitq.lock);
1172
1173 arg.count = count;
1174 ih.len += count * sizeof(struct fuse_forget_one);
1175 err = fuse_copy_one(cs, &ih, sizeof(ih));
1176 if (!err)
1177 err = fuse_copy_one(cs, &arg, sizeof(arg));
1178
1179 while (head) {
1180 struct fuse_forget_link *forget = head;
1181
1182 if (!err) {
1183 err = fuse_copy_one(cs, &forget->forget_one,
1184 sizeof(forget->forget_one));
1185 }
1186 head = forget->next;
1187 kfree(forget);
1188 }
1189
1190 fuse_copy_finish(cs);
1191
1192 if (err)
1193 return err;
1194
1195 return ih.len;
1196 }
1197
1198 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1199 struct fuse_copy_state *cs,
1200 size_t nbytes)
1201 __releases(fiq->waitq.lock)
1202 {
1203 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1204 return fuse_read_single_forget(fiq, cs, nbytes);
1205 else
1206 return fuse_read_batch_forget(fiq, cs, nbytes);
1207 }
1208
1209 /*
1210 * Read a single request into the userspace filesystem's buffer. This
1211 * function waits until a request is available, then removes it from
1212 * the pending list and copies request data to userspace buffer. If
1213 * no reply is needed (FORGET) or request has been aborted or there
1214 * was an error during the copying then it's finished by calling
1215 * request_end(). Otherwise add it to the processing list, and set
1216 * the 'sent' flag.
1217 */
1218 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1219 struct fuse_copy_state *cs, size_t nbytes)
1220 {
1221 ssize_t err;
1222 struct fuse_conn *fc = fud->fc;
1223 struct fuse_iqueue *fiq = &fc->iq;
1224 struct fuse_pqueue *fpq = &fud->pq;
1225 struct fuse_req *req;
1226 struct fuse_in *in;
1227 unsigned reqsize;
1228
1229 if (current_user_ns() != fc->user_ns)
1230 return -EIO;
1231
1232 restart:
1233 spin_lock(&fiq->waitq.lock);
1234 err = -EAGAIN;
1235 if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
1236 !request_pending(fiq))
1237 goto err_unlock;
1238
1239 err = wait_event_interruptible_exclusive_locked(fiq->waitq,
1240 !fiq->connected || request_pending(fiq));
1241 if (err)
1242 goto err_unlock;
1243
1244 err = -ENODEV;
1245 if (!fiq->connected)
1246 goto err_unlock;
1247
1248 if (!list_empty(&fiq->interrupts)) {
1249 req = list_entry(fiq->interrupts.next, struct fuse_req,
1250 intr_entry);
1251 return fuse_read_interrupt(fiq, cs, nbytes, req);
1252 }
1253
1254 if (forget_pending(fiq)) {
1255 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1256 return fuse_read_forget(fc, fiq, cs, nbytes);
1257
1258 if (fiq->forget_batch <= -8)
1259 fiq->forget_batch = 16;
1260 }
1261
1262 req = list_entry(fiq->pending.next, struct fuse_req, list);
1263 clear_bit(FR_PENDING, &req->flags);
1264 list_del_init(&req->list);
1265 spin_unlock(&fiq->waitq.lock);
1266
1267 in = &req->in;
1268 reqsize = in->h.len;
1269
1270 if (task_active_pid_ns(current) != fc->pid_ns) {
1271 rcu_read_lock();
1272 in->h.pid = pid_vnr(find_pid_ns(in->h.pid, fc->pid_ns));
1273 rcu_read_unlock();
1274 }
1275
1276 /* If request is too large, reply with an error and restart the read */
1277 if (nbytes < reqsize) {
1278 req->out.h.error = -EIO;
1279 /* SETXATTR is special, since it may contain too large data */
1280 if (in->h.opcode == FUSE_SETXATTR)
1281 req->out.h.error = -E2BIG;
1282 request_end(fc, req);
1283 goto restart;
1284 }
1285 spin_lock(&fpq->lock);
1286 list_add(&req->list, &fpq->io);
1287 spin_unlock(&fpq->lock);
1288 cs->req = req;
1289 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1290 if (!err)
1291 err = fuse_copy_args(cs, in->numargs, in->argpages,
1292 (struct fuse_arg *) in->args, 0);
1293 fuse_copy_finish(cs);
1294 spin_lock(&fpq->lock);
1295 clear_bit(FR_LOCKED, &req->flags);
1296 if (!fpq->connected) {
1297 err = -ENODEV;
1298 goto out_end;
1299 }
1300 if (err) {
1301 req->out.h.error = -EIO;
1302 goto out_end;
1303 }
1304 if (!test_bit(FR_ISREPLY, &req->flags)) {
1305 err = reqsize;
1306 goto out_end;
1307 }
1308 list_move_tail(&req->list, &fpq->processing);
1309 spin_unlock(&fpq->lock);
1310 set_bit(FR_SENT, &req->flags);
1311 /* matches barrier in request_wait_answer() */
1312 smp_mb__after_atomic();
1313 if (test_bit(FR_INTERRUPTED, &req->flags))
1314 queue_interrupt(fiq, req);
1315
1316 return reqsize;
1317
1318 out_end:
1319 if (!test_bit(FR_PRIVATE, &req->flags))
1320 list_del_init(&req->list);
1321 spin_unlock(&fpq->lock);
1322 request_end(fc, req);
1323 return err;
1324
1325 err_unlock:
1326 spin_unlock(&fiq->waitq.lock);
1327 return err;
1328 }
1329
1330 static int fuse_dev_open(struct inode *inode, struct file *file)
1331 {
1332 /*
1333 * The fuse device's file's private_data is used to hold
1334 * the fuse_conn(ection) when it is mounted, and is used to
1335 * keep track of whether the file has been mounted already.
1336 */
1337 file->private_data = NULL;
1338 return 0;
1339 }
1340
1341 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1342 {
1343 struct fuse_copy_state cs;
1344 struct file *file = iocb->ki_filp;
1345 struct fuse_dev *fud = fuse_get_dev(file);
1346
1347 if (!fud)
1348 return -EPERM;
1349
1350 if (!iter_is_iovec(to))
1351 return -EINVAL;
1352
1353 fuse_copy_init(&cs, 1, to);
1354
1355 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1356 }
1357
1358 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1359 struct pipe_inode_info *pipe,
1360 size_t len, unsigned int flags)
1361 {
1362 int total, ret;
1363 int page_nr = 0;
1364 struct pipe_buffer *bufs;
1365 struct fuse_copy_state cs;
1366 struct fuse_dev *fud = fuse_get_dev(in);
1367
1368 if (!fud)
1369 return -EPERM;
1370
1371 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1372 if (!bufs)
1373 return -ENOMEM;
1374
1375 fuse_copy_init(&cs, 1, NULL);
1376 cs.pipebufs = bufs;
1377 cs.pipe = pipe;
1378 ret = fuse_dev_do_read(fud, in, &cs, len);
1379 if (ret < 0)
1380 goto out;
1381
1382 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1383 ret = -EIO;
1384 goto out;
1385 }
1386
1387 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1388 /*
1389 * Need to be careful about this. Having buf->ops in module
1390 * code can Oops if the buffer persists after module unload.
1391 */
1392 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1393 bufs[page_nr].flags = 0;
1394 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1395 if (unlikely(ret < 0))
1396 break;
1397 }
1398 if (total)
1399 ret = total;
1400 out:
1401 for (; page_nr < cs.nr_segs; page_nr++)
1402 put_page(bufs[page_nr].page);
1403
1404 kfree(bufs);
1405 return ret;
1406 }
1407
1408 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1409 struct fuse_copy_state *cs)
1410 {
1411 struct fuse_notify_poll_wakeup_out outarg;
1412 int err = -EINVAL;
1413
1414 if (size != sizeof(outarg))
1415 goto err;
1416
1417 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1418 if (err)
1419 goto err;
1420
1421 fuse_copy_finish(cs);
1422 return fuse_notify_poll_wakeup(fc, &outarg);
1423
1424 err:
1425 fuse_copy_finish(cs);
1426 return err;
1427 }
1428
1429 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1430 struct fuse_copy_state *cs)
1431 {
1432 struct fuse_notify_inval_inode_out outarg;
1433 int err = -EINVAL;
1434
1435 if (size != sizeof(outarg))
1436 goto err;
1437
1438 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1439 if (err)
1440 goto err;
1441 fuse_copy_finish(cs);
1442
1443 down_read(&fc->killsb);
1444 err = -ENOENT;
1445 if (fc->sb) {
1446 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1447 outarg.off, outarg.len);
1448 }
1449 up_read(&fc->killsb);
1450 return err;
1451
1452 err:
1453 fuse_copy_finish(cs);
1454 return err;
1455 }
1456
1457 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1458 struct fuse_copy_state *cs)
1459 {
1460 struct fuse_notify_inval_entry_out outarg;
1461 int err = -ENOMEM;
1462 char *buf;
1463 struct qstr name;
1464
1465 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1466 if (!buf)
1467 goto err;
1468
1469 err = -EINVAL;
1470 if (size < sizeof(outarg))
1471 goto err;
1472
1473 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1474 if (err)
1475 goto err;
1476
1477 err = -ENAMETOOLONG;
1478 if (outarg.namelen > FUSE_NAME_MAX)
1479 goto err;
1480
1481 err = -EINVAL;
1482 if (size != sizeof(outarg) + outarg.namelen + 1)
1483 goto err;
1484
1485 name.name = buf;
1486 name.len = outarg.namelen;
1487 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1488 if (err)
1489 goto err;
1490 fuse_copy_finish(cs);
1491 buf[outarg.namelen] = 0;
1492
1493 down_read(&fc->killsb);
1494 err = -ENOENT;
1495 if (fc->sb)
1496 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1497 up_read(&fc->killsb);
1498 kfree(buf);
1499 return err;
1500
1501 err:
1502 kfree(buf);
1503 fuse_copy_finish(cs);
1504 return err;
1505 }
1506
1507 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1508 struct fuse_copy_state *cs)
1509 {
1510 struct fuse_notify_delete_out outarg;
1511 int err = -ENOMEM;
1512 char *buf;
1513 struct qstr name;
1514
1515 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1516 if (!buf)
1517 goto err;
1518
1519 err = -EINVAL;
1520 if (size < sizeof(outarg))
1521 goto err;
1522
1523 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1524 if (err)
1525 goto err;
1526
1527 err = -ENAMETOOLONG;
1528 if (outarg.namelen > FUSE_NAME_MAX)
1529 goto err;
1530
1531 err = -EINVAL;
1532 if (size != sizeof(outarg) + outarg.namelen + 1)
1533 goto err;
1534
1535 name.name = buf;
1536 name.len = outarg.namelen;
1537 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1538 if (err)
1539 goto err;
1540 fuse_copy_finish(cs);
1541 buf[outarg.namelen] = 0;
1542
1543 down_read(&fc->killsb);
1544 err = -ENOENT;
1545 if (fc->sb)
1546 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1547 outarg.child, &name);
1548 up_read(&fc->killsb);
1549 kfree(buf);
1550 return err;
1551
1552 err:
1553 kfree(buf);
1554 fuse_copy_finish(cs);
1555 return err;
1556 }
1557
1558 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1559 struct fuse_copy_state *cs)
1560 {
1561 struct fuse_notify_store_out outarg;
1562 struct inode *inode;
1563 struct address_space *mapping;
1564 u64 nodeid;
1565 int err;
1566 pgoff_t index;
1567 unsigned int offset;
1568 unsigned int num;
1569 loff_t file_size;
1570 loff_t end;
1571
1572 err = -EINVAL;
1573 if (size < sizeof(outarg))
1574 goto out_finish;
1575
1576 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1577 if (err)
1578 goto out_finish;
1579
1580 err = -EINVAL;
1581 if (size - sizeof(outarg) != outarg.size)
1582 goto out_finish;
1583
1584 nodeid = outarg.nodeid;
1585
1586 down_read(&fc->killsb);
1587
1588 err = -ENOENT;
1589 if (!fc->sb)
1590 goto out_up_killsb;
1591
1592 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1593 if (!inode)
1594 goto out_up_killsb;
1595
1596 mapping = inode->i_mapping;
1597 index = outarg.offset >> PAGE_SHIFT;
1598 offset = outarg.offset & ~PAGE_MASK;
1599 file_size = i_size_read(inode);
1600 end = outarg.offset + outarg.size;
1601 if (end > file_size) {
1602 file_size = end;
1603 fuse_write_update_size(inode, file_size);
1604 }
1605
1606 num = outarg.size;
1607 while (num) {
1608 struct page *page;
1609 unsigned int this_num;
1610
1611 err = -ENOMEM;
1612 page = find_or_create_page(mapping, index,
1613 mapping_gfp_mask(mapping));
1614 if (!page)
1615 goto out_iput;
1616
1617 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1618 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1619 if (!err && offset == 0 &&
1620 (this_num == PAGE_SIZE || file_size == end))
1621 SetPageUptodate(page);
1622 unlock_page(page);
1623 put_page(page);
1624
1625 if (err)
1626 goto out_iput;
1627
1628 num -= this_num;
1629 offset = 0;
1630 index++;
1631 }
1632
1633 err = 0;
1634
1635 out_iput:
1636 iput(inode);
1637 out_up_killsb:
1638 up_read(&fc->killsb);
1639 out_finish:
1640 fuse_copy_finish(cs);
1641 return err;
1642 }
1643
1644 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1645 {
1646 release_pages(req->pages, req->num_pages);
1647 }
1648
1649 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1650 struct fuse_notify_retrieve_out *outarg)
1651 {
1652 int err;
1653 struct address_space *mapping = inode->i_mapping;
1654 struct fuse_req *req;
1655 pgoff_t index;
1656 loff_t file_size;
1657 unsigned int num;
1658 unsigned int offset;
1659 size_t total_len = 0;
1660 int num_pages;
1661
1662 offset = outarg->offset & ~PAGE_MASK;
1663 file_size = i_size_read(inode);
1664
1665 num = outarg->size;
1666 if (outarg->offset > file_size)
1667 num = 0;
1668 else if (outarg->offset + num > file_size)
1669 num = file_size - outarg->offset;
1670
1671 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1672 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1673
1674 req = fuse_get_req(fc, num_pages);
1675 if (IS_ERR(req))
1676 return PTR_ERR(req);
1677
1678 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1679 req->in.h.nodeid = outarg->nodeid;
1680 req->in.numargs = 2;
1681 req->in.argpages = 1;
1682 req->page_descs[0].offset = offset;
1683 req->end = fuse_retrieve_end;
1684
1685 index = outarg->offset >> PAGE_SHIFT;
1686
1687 while (num && req->num_pages < num_pages) {
1688 struct page *page;
1689 unsigned int this_num;
1690
1691 page = find_get_page(mapping, index);
1692 if (!page)
1693 break;
1694
1695 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1696 req->pages[req->num_pages] = page;
1697 req->page_descs[req->num_pages].length = this_num;
1698 req->num_pages++;
1699
1700 offset = 0;
1701 num -= this_num;
1702 total_len += this_num;
1703 index++;
1704 }
1705 req->misc.retrieve_in.offset = outarg->offset;
1706 req->misc.retrieve_in.size = total_len;
1707 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1708 req->in.args[0].value = &req->misc.retrieve_in;
1709 req->in.args[1].size = total_len;
1710
1711 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1712 if (err)
1713 fuse_retrieve_end(fc, req);
1714
1715 return err;
1716 }
1717
1718 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1719 struct fuse_copy_state *cs)
1720 {
1721 struct fuse_notify_retrieve_out outarg;
1722 struct inode *inode;
1723 int err;
1724
1725 err = -EINVAL;
1726 if (size != sizeof(outarg))
1727 goto copy_finish;
1728
1729 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1730 if (err)
1731 goto copy_finish;
1732
1733 fuse_copy_finish(cs);
1734
1735 down_read(&fc->killsb);
1736 err = -ENOENT;
1737 if (fc->sb) {
1738 u64 nodeid = outarg.nodeid;
1739
1740 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1741 if (inode) {
1742 err = fuse_retrieve(fc, inode, &outarg);
1743 iput(inode);
1744 }
1745 }
1746 up_read(&fc->killsb);
1747
1748 return err;
1749
1750 copy_finish:
1751 fuse_copy_finish(cs);
1752 return err;
1753 }
1754
1755 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1756 unsigned int size, struct fuse_copy_state *cs)
1757 {
1758 /* Don't try to move pages (yet) */
1759 cs->move_pages = 0;
1760
1761 switch (code) {
1762 case FUSE_NOTIFY_POLL:
1763 return fuse_notify_poll(fc, size, cs);
1764
1765 case FUSE_NOTIFY_INVAL_INODE:
1766 return fuse_notify_inval_inode(fc, size, cs);
1767
1768 case FUSE_NOTIFY_INVAL_ENTRY:
1769 return fuse_notify_inval_entry(fc, size, cs);
1770
1771 case FUSE_NOTIFY_STORE:
1772 return fuse_notify_store(fc, size, cs);
1773
1774 case FUSE_NOTIFY_RETRIEVE:
1775 return fuse_notify_retrieve(fc, size, cs);
1776
1777 case FUSE_NOTIFY_DELETE:
1778 return fuse_notify_delete(fc, size, cs);
1779
1780 default:
1781 fuse_copy_finish(cs);
1782 return -EINVAL;
1783 }
1784 }
1785
1786 /* Look up request on processing list by unique ID */
1787 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1788 {
1789 struct fuse_req *req;
1790
1791 list_for_each_entry(req, &fpq->processing, list) {
1792 if (req->in.h.unique == unique || req->intr_unique == unique)
1793 return req;
1794 }
1795 return NULL;
1796 }
1797
1798 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1799 unsigned nbytes)
1800 {
1801 unsigned reqsize = sizeof(struct fuse_out_header);
1802
1803 if (out->h.error)
1804 return nbytes != reqsize ? -EINVAL : 0;
1805
1806 reqsize += len_args(out->numargs, out->args);
1807
1808 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1809 return -EINVAL;
1810 else if (reqsize > nbytes) {
1811 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1812 unsigned diffsize = reqsize - nbytes;
1813 if (diffsize > lastarg->size)
1814 return -EINVAL;
1815 lastarg->size -= diffsize;
1816 }
1817 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1818 out->page_zeroing);
1819 }
1820
1821 /*
1822 * Write a single reply to a request. First the header is copied from
1823 * the write buffer. The request is then searched on the processing
1824 * list by the unique ID found in the header. If found, then remove
1825 * it from the list and copy the rest of the buffer to the request.
1826 * The request is finished by calling request_end()
1827 */
1828 static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1829 struct fuse_copy_state *cs, size_t nbytes)
1830 {
1831 int err;
1832 struct fuse_conn *fc = fud->fc;
1833 struct fuse_pqueue *fpq = &fud->pq;
1834 struct fuse_req *req;
1835 struct fuse_out_header oh;
1836
1837 if (current_user_ns() != fc->user_ns)
1838 return -EIO;
1839
1840 if (nbytes < sizeof(struct fuse_out_header))
1841 return -EINVAL;
1842
1843 err = fuse_copy_one(cs, &oh, sizeof(oh));
1844 if (err)
1845 goto err_finish;
1846
1847 err = -EINVAL;
1848 if (oh.len != nbytes)
1849 goto err_finish;
1850
1851 /*
1852 * Zero oh.unique indicates unsolicited notification message
1853 * and error contains notification code.
1854 */
1855 if (!oh.unique) {
1856 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1857 return err ? err : nbytes;
1858 }
1859
1860 err = -EINVAL;
1861 if (oh.error <= -1000 || oh.error > 0)
1862 goto err_finish;
1863
1864 spin_lock(&fpq->lock);
1865 err = -ENOENT;
1866 if (!fpq->connected)
1867 goto err_unlock_pq;
1868
1869 req = request_find(fpq, oh.unique);
1870 if (!req)
1871 goto err_unlock_pq;
1872
1873 /* Is it an interrupt reply? */
1874 if (req->intr_unique == oh.unique) {
1875 spin_unlock(&fpq->lock);
1876
1877 err = -EINVAL;
1878 if (nbytes != sizeof(struct fuse_out_header))
1879 goto err_finish;
1880
1881 if (oh.error == -ENOSYS)
1882 fc->no_interrupt = 1;
1883 else if (oh.error == -EAGAIN)
1884 queue_interrupt(&fc->iq, req);
1885
1886 fuse_copy_finish(cs);
1887 return nbytes;
1888 }
1889
1890 clear_bit(FR_SENT, &req->flags);
1891 list_move(&req->list, &fpq->io);
1892 req->out.h = oh;
1893 set_bit(FR_LOCKED, &req->flags);
1894 spin_unlock(&fpq->lock);
1895 cs->req = req;
1896 if (!req->out.page_replace)
1897 cs->move_pages = 0;
1898
1899 err = copy_out_args(cs, &req->out, nbytes);
1900 fuse_copy_finish(cs);
1901
1902 spin_lock(&fpq->lock);
1903 clear_bit(FR_LOCKED, &req->flags);
1904 if (!fpq->connected)
1905 err = -ENOENT;
1906 else if (err)
1907 req->out.h.error = -EIO;
1908 if (!test_bit(FR_PRIVATE, &req->flags))
1909 list_del_init(&req->list);
1910 spin_unlock(&fpq->lock);
1911
1912 request_end(fc, req);
1913
1914 return err ? err : nbytes;
1915
1916 err_unlock_pq:
1917 spin_unlock(&fpq->lock);
1918 err_finish:
1919 fuse_copy_finish(cs);
1920 return err;
1921 }
1922
1923 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1924 {
1925 struct fuse_copy_state cs;
1926 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1927
1928 if (!fud)
1929 return -EPERM;
1930
1931 if (!iter_is_iovec(from))
1932 return -EINVAL;
1933
1934 fuse_copy_init(&cs, 0, from);
1935
1936 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
1937 }
1938
1939 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1940 struct file *out, loff_t *ppos,
1941 size_t len, unsigned int flags)
1942 {
1943 unsigned nbuf;
1944 unsigned idx;
1945 struct pipe_buffer *bufs;
1946 struct fuse_copy_state cs;
1947 struct fuse_dev *fud;
1948 size_t rem;
1949 ssize_t ret;
1950
1951 fud = fuse_get_dev(out);
1952 if (!fud)
1953 return -EPERM;
1954
1955 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1956 if (!bufs)
1957 return -ENOMEM;
1958
1959 pipe_lock(pipe);
1960 nbuf = 0;
1961 rem = 0;
1962 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1963 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1964
1965 ret = -EINVAL;
1966 if (rem < len) {
1967 pipe_unlock(pipe);
1968 goto out;
1969 }
1970
1971 rem = len;
1972 while (rem) {
1973 struct pipe_buffer *ibuf;
1974 struct pipe_buffer *obuf;
1975
1976 BUG_ON(nbuf >= pipe->buffers);
1977 BUG_ON(!pipe->nrbufs);
1978 ibuf = &pipe->bufs[pipe->curbuf];
1979 obuf = &bufs[nbuf];
1980
1981 if (rem >= ibuf->len) {
1982 *obuf = *ibuf;
1983 ibuf->ops = NULL;
1984 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1985 pipe->nrbufs--;
1986 } else {
1987 pipe_buf_get(pipe, ibuf);
1988 *obuf = *ibuf;
1989 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1990 obuf->len = rem;
1991 ibuf->offset += obuf->len;
1992 ibuf->len -= obuf->len;
1993 }
1994 nbuf++;
1995 rem -= obuf->len;
1996 }
1997 pipe_unlock(pipe);
1998
1999 fuse_copy_init(&cs, 0, NULL);
2000 cs.pipebufs = bufs;
2001 cs.nr_segs = nbuf;
2002 cs.pipe = pipe;
2003
2004 if (flags & SPLICE_F_MOVE)
2005 cs.move_pages = 1;
2006
2007 ret = fuse_dev_do_write(fud, &cs, len);
2008
2009 for (idx = 0; idx < nbuf; idx++)
2010 pipe_buf_release(pipe, &bufs[idx]);
2011
2012 out:
2013 kfree(bufs);
2014 return ret;
2015 }
2016
2017 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2018 {
2019 unsigned mask = POLLOUT | POLLWRNORM;
2020 struct fuse_iqueue *fiq;
2021 struct fuse_dev *fud = fuse_get_dev(file);
2022
2023 if (!fud)
2024 return POLLERR;
2025
2026 fiq = &fud->fc->iq;
2027 poll_wait(file, &fiq->waitq, wait);
2028
2029 spin_lock(&fiq->waitq.lock);
2030 if (!fiq->connected)
2031 mask = POLLERR;
2032 else if (request_pending(fiq))
2033 mask |= POLLIN | POLLRDNORM;
2034 spin_unlock(&fiq->waitq.lock);
2035
2036 return mask;
2037 }
2038
2039 /*
2040 * Abort all requests on the given list (pending or processing)
2041 *
2042 * This function releases and reacquires fc->lock
2043 */
2044 static void end_requests(struct fuse_conn *fc, struct list_head *head)
2045 {
2046 while (!list_empty(head)) {
2047 struct fuse_req *req;
2048 req = list_entry(head->next, struct fuse_req, list);
2049 req->out.h.error = -ECONNABORTED;
2050 clear_bit(FR_SENT, &req->flags);
2051 list_del_init(&req->list);
2052 request_end(fc, req);
2053 }
2054 }
2055
2056 static void end_polls(struct fuse_conn *fc)
2057 {
2058 struct rb_node *p;
2059
2060 p = rb_first(&fc->polled_files);
2061
2062 while (p) {
2063 struct fuse_file *ff;
2064 ff = rb_entry(p, struct fuse_file, polled_node);
2065 wake_up_interruptible_all(&ff->poll_wait);
2066
2067 p = rb_next(p);
2068 }
2069 }
2070
2071 /*
2072 * Abort all requests.
2073 *
2074 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2075 * filesystem.
2076 *
2077 * The same effect is usually achievable through killing the filesystem daemon
2078 * and all users of the filesystem. The exception is the combination of an
2079 * asynchronous request and the tricky deadlock (see
2080 * Documentation/filesystems/fuse.txt).
2081 *
2082 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2083 * requests, they should be finished off immediately. Locked requests will be
2084 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2085 * requests. It is possible that some request will finish before we can. This
2086 * is OK, the request will in that case be removed from the list before we touch
2087 * it.
2088 */
2089 void fuse_abort_conn(struct fuse_conn *fc)
2090 {
2091 struct fuse_iqueue *fiq = &fc->iq;
2092
2093 spin_lock(&fc->lock);
2094 if (fc->connected) {
2095 struct fuse_dev *fud;
2096 struct fuse_req *req, *next;
2097 LIST_HEAD(to_end1);
2098 LIST_HEAD(to_end2);
2099
2100 fc->connected = 0;
2101 fc->blocked = 0;
2102 fuse_set_initialized(fc);
2103 list_for_each_entry(fud, &fc->devices, entry) {
2104 struct fuse_pqueue *fpq = &fud->pq;
2105
2106 spin_lock(&fpq->lock);
2107 fpq->connected = 0;
2108 list_for_each_entry_safe(req, next, &fpq->io, list) {
2109 req->out.h.error = -ECONNABORTED;
2110 spin_lock(&req->waitq.lock);
2111 set_bit(FR_ABORTED, &req->flags);
2112 if (!test_bit(FR_LOCKED, &req->flags)) {
2113 set_bit(FR_PRIVATE, &req->flags);
2114 __fuse_get_request(req);
2115 list_move(&req->list, &to_end1);
2116 }
2117 spin_unlock(&req->waitq.lock);
2118 }
2119 list_splice_init(&fpq->processing, &to_end2);
2120 spin_unlock(&fpq->lock);
2121 }
2122 fc->max_background = UINT_MAX;
2123 flush_bg_queue(fc);
2124
2125 spin_lock(&fiq->waitq.lock);
2126 fiq->connected = 0;
2127 list_splice_init(&fiq->pending, &to_end2);
2128 list_for_each_entry(req, &to_end2, list)
2129 clear_bit(FR_PENDING, &req->flags);
2130 while (forget_pending(fiq))
2131 kfree(dequeue_forget(fiq, 1, NULL));
2132 wake_up_all_locked(&fiq->waitq);
2133 spin_unlock(&fiq->waitq.lock);
2134 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2135 end_polls(fc);
2136 wake_up_all(&fc->blocked_waitq);
2137 spin_unlock(&fc->lock);
2138
2139 while (!list_empty(&to_end1)) {
2140 req = list_first_entry(&to_end1, struct fuse_req, list);
2141 list_del_init(&req->list);
2142 request_end(fc, req);
2143 }
2144 end_requests(fc, &to_end2);
2145 } else {
2146 spin_unlock(&fc->lock);
2147 }
2148 }
2149 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2150
2151 int fuse_dev_release(struct inode *inode, struct file *file)
2152 {
2153 struct fuse_dev *fud = fuse_get_dev(file);
2154
2155 if (fud) {
2156 struct fuse_conn *fc = fud->fc;
2157 struct fuse_pqueue *fpq = &fud->pq;
2158 LIST_HEAD(to_end);
2159
2160 spin_lock(&fpq->lock);
2161 WARN_ON(!list_empty(&fpq->io));
2162 list_splice_init(&fpq->processing, &to_end);
2163 spin_unlock(&fpq->lock);
2164
2165 end_requests(fc, &to_end);
2166
2167 /* Are we the last open device? */
2168 if (atomic_dec_and_test(&fc->dev_count)) {
2169 WARN_ON(fc->iq.fasync != NULL);
2170 fuse_abort_conn(fc);
2171 }
2172 fuse_dev_free(fud);
2173 }
2174 return 0;
2175 }
2176 EXPORT_SYMBOL_GPL(fuse_dev_release);
2177
2178 static int fuse_dev_fasync(int fd, struct file *file, int on)
2179 {
2180 struct fuse_dev *fud = fuse_get_dev(file);
2181
2182 if (!fud)
2183 return -EPERM;
2184
2185 /* No locking - fasync_helper does its own locking */
2186 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2187 }
2188
2189 static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2190 {
2191 struct fuse_dev *fud;
2192
2193 if (new->private_data)
2194 return -EINVAL;
2195
2196 fud = fuse_dev_alloc(fc);
2197 if (!fud)
2198 return -ENOMEM;
2199
2200 new->private_data = fud;
2201 atomic_inc(&fc->dev_count);
2202
2203 return 0;
2204 }
2205
2206 static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2207 unsigned long arg)
2208 {
2209 int err = -ENOTTY;
2210
2211 if (cmd == FUSE_DEV_IOC_CLONE) {
2212 int oldfd;
2213
2214 err = -EFAULT;
2215 if (!get_user(oldfd, (__u32 __user *) arg)) {
2216 struct file *old = fget(oldfd);
2217
2218 err = -EINVAL;
2219 if (old) {
2220 struct fuse_dev *fud = NULL;
2221
2222 /*
2223 * Check against file->f_op because CUSE
2224 * uses the same ioctl handler.
2225 */
2226 if (old->f_op == file->f_op &&
2227 old->f_cred->user_ns == file->f_cred->user_ns)
2228 fud = fuse_get_dev(old);
2229
2230 if (fud) {
2231 mutex_lock(&fuse_mutex);
2232 err = fuse_device_clone(fud->fc, file);
2233 mutex_unlock(&fuse_mutex);
2234 }
2235 fput(old);
2236 }
2237 }
2238 }
2239 return err;
2240 }
2241
2242 const struct file_operations fuse_dev_operations = {
2243 .owner = THIS_MODULE,
2244 .open = fuse_dev_open,
2245 .llseek = no_llseek,
2246 .read_iter = fuse_dev_read,
2247 .splice_read = fuse_dev_splice_read,
2248 .write_iter = fuse_dev_write,
2249 .splice_write = fuse_dev_splice_write,
2250 .poll = fuse_dev_poll,
2251 .release = fuse_dev_release,
2252 .fasync = fuse_dev_fasync,
2253 .unlocked_ioctl = fuse_dev_ioctl,
2254 .compat_ioctl = fuse_dev_ioctl,
2255 };
2256 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2257
2258 static struct miscdevice fuse_miscdevice = {
2259 .minor = FUSE_MINOR,
2260 .name = "fuse",
2261 .fops = &fuse_dev_operations,
2262 };
2263
2264 int __init fuse_dev_init(void)
2265 {
2266 int err = -ENOMEM;
2267 fuse_req_cachep = kmem_cache_create("fuse_request",
2268 sizeof(struct fuse_req),
2269 0, 0, NULL);
2270 if (!fuse_req_cachep)
2271 goto out;
2272
2273 err = misc_register(&fuse_miscdevice);
2274 if (err)
2275 goto out_cache_clean;
2276
2277 return 0;
2278
2279 out_cache_clean:
2280 kmem_cache_destroy(fuse_req_cachep);
2281 out:
2282 return err;
2283 }
2284
2285 void fuse_dev_cleanup(void)
2286 {
2287 misc_deregister(&fuse_miscdevice);
2288 kmem_cache_destroy(fuse_req_cachep);
2289 }