2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/sched/signal.h>
15 #include <linux/uio.h>
16 #include <linux/miscdevice.h>
17 #include <linux/pagemap.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/pipe_fs_i.h>
21 #include <linux/swap.h>
22 #include <linux/splice.h>
23 #include <linux/sched.h>
25 MODULE_ALIAS_MISCDEV(FUSE_MINOR
);
26 MODULE_ALIAS("devname:fuse");
28 static struct kmem_cache
*fuse_req_cachep
;
30 static struct fuse_dev
*fuse_get_dev(struct file
*file
)
33 * Lockless access is OK, because file->private data is set
34 * once during mount and is valid until the file is released.
36 return READ_ONCE(file
->private_data
);
39 static void fuse_request_init(struct fuse_req
*req
, struct page
**pages
,
40 struct fuse_page_desc
*page_descs
,
43 memset(req
, 0, sizeof(*req
));
44 memset(pages
, 0, sizeof(*pages
) * npages
);
45 memset(page_descs
, 0, sizeof(*page_descs
) * npages
);
46 INIT_LIST_HEAD(&req
->list
);
47 INIT_LIST_HEAD(&req
->intr_entry
);
48 init_waitqueue_head(&req
->waitq
);
49 refcount_set(&req
->count
, 1);
51 req
->page_descs
= page_descs
;
52 req
->max_pages
= npages
;
53 __set_bit(FR_PENDING
, &req
->flags
);
56 static struct fuse_req
*__fuse_request_alloc(unsigned npages
, gfp_t flags
)
58 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, flags
);
61 struct fuse_page_desc
*page_descs
;
63 if (npages
<= FUSE_REQ_INLINE_PAGES
) {
64 pages
= req
->inline_pages
;
65 page_descs
= req
->inline_page_descs
;
67 pages
= kmalloc(sizeof(struct page
*) * npages
, flags
);
68 page_descs
= kmalloc(sizeof(struct fuse_page_desc
) *
72 if (!pages
|| !page_descs
) {
75 kmem_cache_free(fuse_req_cachep
, req
);
79 fuse_request_init(req
, pages
, page_descs
, npages
);
84 struct fuse_req
*fuse_request_alloc(unsigned npages
)
86 return __fuse_request_alloc(npages
, GFP_KERNEL
);
88 EXPORT_SYMBOL_GPL(fuse_request_alloc
);
90 struct fuse_req
*fuse_request_alloc_nofs(unsigned npages
)
92 return __fuse_request_alloc(npages
, GFP_NOFS
);
95 void fuse_request_free(struct fuse_req
*req
)
97 if (req
->pages
!= req
->inline_pages
) {
99 kfree(req
->page_descs
);
101 kmem_cache_free(fuse_req_cachep
, req
);
104 void __fuse_get_request(struct fuse_req
*req
)
106 refcount_inc(&req
->count
);
109 /* Must be called with > 1 refcount */
110 static void __fuse_put_request(struct fuse_req
*req
)
112 refcount_dec(&req
->count
);
115 static void fuse_req_init_context(struct fuse_conn
*fc
, struct fuse_req
*req
)
117 req
->in
.h
.uid
= from_kuid(fc
->user_ns
, current_fsuid());
118 req
->in
.h
.gid
= from_kgid(fc
->user_ns
, current_fsgid());
119 req
->in
.h
.pid
= pid_nr_ns(task_pid(current
), fc
->pid_ns
);
122 void fuse_set_initialized(struct fuse_conn
*fc
)
124 /* Make sure stores before this are seen on another CPU */
129 static bool fuse_block_alloc(struct fuse_conn
*fc
, bool for_background
)
131 return !fc
->initialized
|| (for_background
&& fc
->blocked
);
134 static struct fuse_req
*__fuse_get_req(struct fuse_conn
*fc
, unsigned npages
,
137 struct fuse_req
*req
;
139 atomic_inc(&fc
->num_waiting
);
141 if (fuse_block_alloc(fc
, for_background
)) {
143 if (wait_event_killable_exclusive(fc
->blocked_waitq
,
144 !fuse_block_alloc(fc
, for_background
)))
147 /* Matches smp_wmb() in fuse_set_initialized() */
158 req
= fuse_request_alloc(npages
);
162 wake_up(&fc
->blocked_waitq
);
166 fuse_req_init_context(fc
, req
);
167 __set_bit(FR_WAITING
, &req
->flags
);
169 __set_bit(FR_BACKGROUND
, &req
->flags
);
170 if (req
->in
.h
.uid
== (uid_t
)-1 || req
->in
.h
.gid
== (gid_t
)-1) {
171 fuse_put_request(fc
, req
);
172 return ERR_PTR(-EOVERFLOW
);
178 atomic_dec(&fc
->num_waiting
);
182 struct fuse_req
*fuse_get_req(struct fuse_conn
*fc
, unsigned npages
)
184 return __fuse_get_req(fc
, npages
, false);
186 EXPORT_SYMBOL_GPL(fuse_get_req
);
188 struct fuse_req
*fuse_get_req_for_background(struct fuse_conn
*fc
,
191 return __fuse_get_req(fc
, npages
, true);
193 EXPORT_SYMBOL_GPL(fuse_get_req_for_background
);
196 * Return request in fuse_file->reserved_req. However that may
197 * currently be in use. If that is the case, wait for it to become
200 static struct fuse_req
*get_reserved_req(struct fuse_conn
*fc
,
203 struct fuse_req
*req
= NULL
;
204 struct fuse_file
*ff
= file
->private_data
;
207 wait_event(fc
->reserved_req_waitq
, ff
->reserved_req
);
208 spin_lock(&fc
->lock
);
209 if (ff
->reserved_req
) {
210 req
= ff
->reserved_req
;
211 ff
->reserved_req
= NULL
;
212 req
->stolen_file
= get_file(file
);
214 spin_unlock(&fc
->lock
);
221 * Put stolen request back into fuse_file->reserved_req
223 static void put_reserved_req(struct fuse_conn
*fc
, struct fuse_req
*req
)
225 struct file
*file
= req
->stolen_file
;
226 struct fuse_file
*ff
= file
->private_data
;
228 spin_lock(&fc
->lock
);
229 fuse_request_init(req
, req
->pages
, req
->page_descs
, req
->max_pages
);
230 BUG_ON(ff
->reserved_req
);
231 ff
->reserved_req
= req
;
232 wake_up_all(&fc
->reserved_req_waitq
);
233 spin_unlock(&fc
->lock
);
238 * Gets a requests for a file operation, always succeeds
240 * This is used for sending the FLUSH request, which must get to
241 * userspace, due to POSIX locks which may need to be unlocked.
243 * If allocation fails due to OOM, use the reserved request in
246 * This is very unlikely to deadlock accidentally, since the
247 * filesystem should not have it's own file open. If deadlock is
248 * intentional, it can still be broken by "aborting" the filesystem.
250 struct fuse_req
*fuse_get_req_nofail_nopages(struct fuse_conn
*fc
,
253 struct fuse_req
*req
;
255 atomic_inc(&fc
->num_waiting
);
256 wait_event(fc
->blocked_waitq
, fc
->initialized
);
257 /* Matches smp_wmb() in fuse_set_initialized() */
259 req
= fuse_request_alloc(0);
261 req
= get_reserved_req(fc
, file
);
263 fuse_req_init_context(fc
, req
);
264 __set_bit(FR_WAITING
, &req
->flags
);
265 __clear_bit(FR_BACKGROUND
, &req
->flags
);
269 void fuse_put_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
271 if (refcount_dec_and_test(&req
->count
)) {
272 if (test_bit(FR_BACKGROUND
, &req
->flags
)) {
274 * We get here in the unlikely case that a background
275 * request was allocated but not sent
277 spin_lock(&fc
->lock
);
279 wake_up(&fc
->blocked_waitq
);
280 spin_unlock(&fc
->lock
);
283 if (test_bit(FR_WAITING
, &req
->flags
)) {
284 __clear_bit(FR_WAITING
, &req
->flags
);
285 atomic_dec(&fc
->num_waiting
);
288 if (req
->stolen_file
)
289 put_reserved_req(fc
, req
);
291 fuse_request_free(req
);
294 EXPORT_SYMBOL_GPL(fuse_put_request
);
296 static unsigned len_args(unsigned numargs
, struct fuse_arg
*args
)
301 for (i
= 0; i
< numargs
; i
++)
302 nbytes
+= args
[i
].size
;
307 static u64
fuse_get_unique(struct fuse_iqueue
*fiq
)
309 return ++fiq
->reqctr
;
312 static void queue_request(struct fuse_iqueue
*fiq
, struct fuse_req
*req
)
314 req
->in
.h
.len
= sizeof(struct fuse_in_header
) +
315 len_args(req
->in
.numargs
, (struct fuse_arg
*) req
->in
.args
);
316 list_add_tail(&req
->list
, &fiq
->pending
);
317 wake_up_locked(&fiq
->waitq
);
318 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
321 void fuse_queue_forget(struct fuse_conn
*fc
, struct fuse_forget_link
*forget
,
322 u64 nodeid
, u64 nlookup
)
324 struct fuse_iqueue
*fiq
= &fc
->iq
;
326 forget
->forget_one
.nodeid
= nodeid
;
327 forget
->forget_one
.nlookup
= nlookup
;
329 spin_lock(&fiq
->waitq
.lock
);
330 if (fiq
->connected
) {
331 fiq
->forget_list_tail
->next
= forget
;
332 fiq
->forget_list_tail
= forget
;
333 wake_up_locked(&fiq
->waitq
);
334 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
338 spin_unlock(&fiq
->waitq
.lock
);
341 static void flush_bg_queue(struct fuse_conn
*fc
)
343 while (fc
->active_background
< fc
->max_background
&&
344 !list_empty(&fc
->bg_queue
)) {
345 struct fuse_req
*req
;
346 struct fuse_iqueue
*fiq
= &fc
->iq
;
348 req
= list_entry(fc
->bg_queue
.next
, struct fuse_req
, list
);
349 list_del(&req
->list
);
350 fc
->active_background
++;
351 spin_lock(&fiq
->waitq
.lock
);
352 req
->in
.h
.unique
= fuse_get_unique(fiq
);
353 queue_request(fiq
, req
);
354 spin_unlock(&fiq
->waitq
.lock
);
359 * This function is called when a request is finished. Either a reply
360 * has arrived or it was aborted (and not yet sent) or some error
361 * occurred during communication with userspace, or the device file
362 * was closed. The requester thread is woken up (if still waiting),
363 * the 'end' callback is called if given, else the reference to the
364 * request is released
366 static void request_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
368 struct fuse_iqueue
*fiq
= &fc
->iq
;
370 if (test_and_set_bit(FR_FINISHED
, &req
->flags
))
373 spin_lock(&fiq
->waitq
.lock
);
374 list_del_init(&req
->intr_entry
);
375 spin_unlock(&fiq
->waitq
.lock
);
376 WARN_ON(test_bit(FR_PENDING
, &req
->flags
));
377 WARN_ON(test_bit(FR_SENT
, &req
->flags
));
378 if (test_bit(FR_BACKGROUND
, &req
->flags
)) {
379 spin_lock(&fc
->lock
);
380 clear_bit(FR_BACKGROUND
, &req
->flags
);
381 if (fc
->num_background
== fc
->max_background
)
384 /* Wake up next waiter, if any */
385 if (!fc
->blocked
&& waitqueue_active(&fc
->blocked_waitq
))
386 wake_up(&fc
->blocked_waitq
);
388 if (fc
->num_background
== fc
->congestion_threshold
&& fc
->sb
) {
389 clear_bdi_congested(fc
->sb
->s_bdi
, BLK_RW_SYNC
);
390 clear_bdi_congested(fc
->sb
->s_bdi
, BLK_RW_ASYNC
);
392 fc
->num_background
--;
393 fc
->active_background
--;
395 spin_unlock(&fc
->lock
);
397 wake_up(&req
->waitq
);
401 fuse_put_request(fc
, req
);
404 static void queue_interrupt(struct fuse_iqueue
*fiq
, struct fuse_req
*req
)
406 spin_lock(&fiq
->waitq
.lock
);
407 if (test_bit(FR_FINISHED
, &req
->flags
)) {
408 spin_unlock(&fiq
->waitq
.lock
);
411 if (list_empty(&req
->intr_entry
)) {
412 list_add_tail(&req
->intr_entry
, &fiq
->interrupts
);
413 wake_up_locked(&fiq
->waitq
);
415 spin_unlock(&fiq
->waitq
.lock
);
416 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
419 static void request_wait_answer(struct fuse_conn
*fc
, struct fuse_req
*req
)
421 struct fuse_iqueue
*fiq
= &fc
->iq
;
424 if (!fc
->no_interrupt
) {
425 /* Any signal may interrupt this */
426 err
= wait_event_interruptible(req
->waitq
,
427 test_bit(FR_FINISHED
, &req
->flags
));
431 set_bit(FR_INTERRUPTED
, &req
->flags
);
432 /* matches barrier in fuse_dev_do_read() */
433 smp_mb__after_atomic();
434 if (test_bit(FR_SENT
, &req
->flags
))
435 queue_interrupt(fiq
, req
);
438 if (!test_bit(FR_FORCE
, &req
->flags
)) {
439 /* Only fatal signals may interrupt this */
440 err
= wait_event_killable(req
->waitq
,
441 test_bit(FR_FINISHED
, &req
->flags
));
445 spin_lock(&fiq
->waitq
.lock
);
446 /* Request is not yet in userspace, bail out */
447 if (test_bit(FR_PENDING
, &req
->flags
)) {
448 list_del(&req
->list
);
449 spin_unlock(&fiq
->waitq
.lock
);
450 __fuse_put_request(req
);
451 req
->out
.h
.error
= -EINTR
;
454 spin_unlock(&fiq
->waitq
.lock
);
458 * Either request is already in userspace, or it was forced.
461 wait_event(req
->waitq
, test_bit(FR_FINISHED
, &req
->flags
));
464 static void __fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
466 struct fuse_iqueue
*fiq
= &fc
->iq
;
468 BUG_ON(test_bit(FR_BACKGROUND
, &req
->flags
));
469 spin_lock(&fiq
->waitq
.lock
);
470 if (!fiq
->connected
) {
471 spin_unlock(&fiq
->waitq
.lock
);
472 req
->out
.h
.error
= -ENOTCONN
;
474 req
->in
.h
.unique
= fuse_get_unique(fiq
);
475 queue_request(fiq
, req
);
476 /* acquire extra reference, since request is still needed
477 after request_end() */
478 __fuse_get_request(req
);
479 spin_unlock(&fiq
->waitq
.lock
);
481 request_wait_answer(fc
, req
);
482 /* Pairs with smp_wmb() in request_end() */
487 void fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
489 __set_bit(FR_ISREPLY
, &req
->flags
);
490 if (!test_bit(FR_WAITING
, &req
->flags
)) {
491 __set_bit(FR_WAITING
, &req
->flags
);
492 atomic_inc(&fc
->num_waiting
);
494 __fuse_request_send(fc
, req
);
496 EXPORT_SYMBOL_GPL(fuse_request_send
);
498 static void fuse_adjust_compat(struct fuse_conn
*fc
, struct fuse_args
*args
)
500 if (fc
->minor
< 4 && args
->in
.h
.opcode
== FUSE_STATFS
)
501 args
->out
.args
[0].size
= FUSE_COMPAT_STATFS_SIZE
;
504 switch (args
->in
.h
.opcode
) {
511 args
->out
.args
[0].size
= FUSE_COMPAT_ENTRY_OUT_SIZE
;
515 args
->out
.args
[0].size
= FUSE_COMPAT_ATTR_OUT_SIZE
;
519 if (fc
->minor
< 12) {
520 switch (args
->in
.h
.opcode
) {
522 args
->in
.args
[0].size
= sizeof(struct fuse_open_in
);
525 args
->in
.args
[0].size
= FUSE_COMPAT_MKNOD_IN_SIZE
;
531 ssize_t
fuse_simple_request(struct fuse_conn
*fc
, struct fuse_args
*args
)
533 struct fuse_req
*req
;
536 req
= fuse_get_req(fc
, 0);
540 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
541 fuse_adjust_compat(fc
, args
);
543 req
->in
.h
.opcode
= args
->in
.h
.opcode
;
544 req
->in
.h
.nodeid
= args
->in
.h
.nodeid
;
545 req
->in
.numargs
= args
->in
.numargs
;
546 memcpy(req
->in
.args
, args
->in
.args
,
547 args
->in
.numargs
* sizeof(struct fuse_in_arg
));
548 req
->out
.argvar
= args
->out
.argvar
;
549 req
->out
.numargs
= args
->out
.numargs
;
550 memcpy(req
->out
.args
, args
->out
.args
,
551 args
->out
.numargs
* sizeof(struct fuse_arg
));
552 fuse_request_send(fc
, req
);
553 ret
= req
->out
.h
.error
;
554 if (!ret
&& args
->out
.argvar
) {
555 BUG_ON(args
->out
.numargs
!= 1);
556 ret
= req
->out
.args
[0].size
;
558 fuse_put_request(fc
, req
);
564 * Called under fc->lock
566 * fc->connected must have been checked previously
568 void fuse_request_send_background_locked(struct fuse_conn
*fc
,
569 struct fuse_req
*req
)
571 BUG_ON(!test_bit(FR_BACKGROUND
, &req
->flags
));
572 if (!test_bit(FR_WAITING
, &req
->flags
)) {
573 __set_bit(FR_WAITING
, &req
->flags
);
574 atomic_inc(&fc
->num_waiting
);
576 __set_bit(FR_ISREPLY
, &req
->flags
);
577 fc
->num_background
++;
578 if (fc
->num_background
== fc
->max_background
)
580 if (fc
->num_background
== fc
->congestion_threshold
&& fc
->sb
) {
581 set_bdi_congested(fc
->sb
->s_bdi
, BLK_RW_SYNC
);
582 set_bdi_congested(fc
->sb
->s_bdi
, BLK_RW_ASYNC
);
584 list_add_tail(&req
->list
, &fc
->bg_queue
);
588 void fuse_request_send_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
591 spin_lock(&fc
->lock
);
593 fuse_request_send_background_locked(fc
, req
);
594 spin_unlock(&fc
->lock
);
596 spin_unlock(&fc
->lock
);
597 req
->out
.h
.error
= -ENOTCONN
;
599 fuse_put_request(fc
, req
);
602 EXPORT_SYMBOL_GPL(fuse_request_send_background
);
604 static int fuse_request_send_notify_reply(struct fuse_conn
*fc
,
605 struct fuse_req
*req
, u64 unique
)
608 struct fuse_iqueue
*fiq
= &fc
->iq
;
610 __clear_bit(FR_ISREPLY
, &req
->flags
);
611 req
->in
.h
.unique
= unique
;
612 spin_lock(&fiq
->waitq
.lock
);
613 if (fiq
->connected
) {
614 queue_request(fiq
, req
);
617 spin_unlock(&fiq
->waitq
.lock
);
622 void fuse_force_forget(struct file
*file
, u64 nodeid
)
624 struct inode
*inode
= file_inode(file
);
625 struct fuse_conn
*fc
= get_fuse_conn(inode
);
626 struct fuse_req
*req
;
627 struct fuse_forget_in inarg
;
629 memset(&inarg
, 0, sizeof(inarg
));
631 req
= fuse_get_req_nofail_nopages(fc
, file
);
632 req
->in
.h
.opcode
= FUSE_FORGET
;
633 req
->in
.h
.nodeid
= nodeid
;
635 req
->in
.args
[0].size
= sizeof(inarg
);
636 req
->in
.args
[0].value
= &inarg
;
637 __clear_bit(FR_ISREPLY
, &req
->flags
);
638 __fuse_request_send(fc
, req
);
640 fuse_put_request(fc
, req
);
644 * Lock the request. Up to the next unlock_request() there mustn't be
645 * anything that could cause a page-fault. If the request was already
648 static int lock_request(struct fuse_req
*req
)
652 spin_lock(&req
->waitq
.lock
);
653 if (test_bit(FR_ABORTED
, &req
->flags
))
656 set_bit(FR_LOCKED
, &req
->flags
);
657 spin_unlock(&req
->waitq
.lock
);
663 * Unlock request. If it was aborted while locked, caller is responsible
664 * for unlocking and ending the request.
666 static int unlock_request(struct fuse_req
*req
)
670 spin_lock(&req
->waitq
.lock
);
671 if (test_bit(FR_ABORTED
, &req
->flags
))
674 clear_bit(FR_LOCKED
, &req
->flags
);
675 spin_unlock(&req
->waitq
.lock
);
680 struct fuse_copy_state
{
682 struct fuse_req
*req
;
683 struct iov_iter
*iter
;
684 struct pipe_buffer
*pipebufs
;
685 struct pipe_buffer
*currbuf
;
686 struct pipe_inode_info
*pipe
;
687 unsigned long nr_segs
;
691 unsigned move_pages
:1;
694 static void fuse_copy_init(struct fuse_copy_state
*cs
, int write
,
695 struct iov_iter
*iter
)
697 memset(cs
, 0, sizeof(*cs
));
702 /* Unmap and put previous page of userspace buffer */
703 static void fuse_copy_finish(struct fuse_copy_state
*cs
)
706 struct pipe_buffer
*buf
= cs
->currbuf
;
709 buf
->len
= PAGE_SIZE
- cs
->len
;
713 flush_dcache_page(cs
->pg
);
714 set_page_dirty_lock(cs
->pg
);
722 * Get another pagefull of userspace buffer, and map it to kernel
723 * address space, and lock request
725 static int fuse_copy_fill(struct fuse_copy_state
*cs
)
730 err
= unlock_request(cs
->req
);
734 fuse_copy_finish(cs
);
736 struct pipe_buffer
*buf
= cs
->pipebufs
;
739 err
= pipe_buf_confirm(cs
->pipe
, buf
);
743 BUG_ON(!cs
->nr_segs
);
746 cs
->offset
= buf
->offset
;
751 if (cs
->nr_segs
== cs
->pipe
->buffers
)
754 page
= alloc_page(GFP_HIGHUSER
);
771 err
= iov_iter_get_pages(cs
->iter
, &page
, PAGE_SIZE
, 1, &off
);
778 iov_iter_advance(cs
->iter
, err
);
781 return lock_request(cs
->req
);
784 /* Do as much copy to/from userspace buffer as we can */
785 static int fuse_copy_do(struct fuse_copy_state
*cs
, void **val
, unsigned *size
)
787 unsigned ncpy
= min(*size
, cs
->len
);
789 void *pgaddr
= kmap_atomic(cs
->pg
);
790 void *buf
= pgaddr
+ cs
->offset
;
793 memcpy(buf
, *val
, ncpy
);
795 memcpy(*val
, buf
, ncpy
);
797 kunmap_atomic(pgaddr
);
806 static int fuse_check_page(struct page
*page
)
808 if (page_mapcount(page
) ||
809 page
->mapping
!= NULL
||
810 page_count(page
) != 1 ||
811 (page
->flags
& PAGE_FLAGS_CHECK_AT_PREP
&
818 printk(KERN_WARNING
"fuse: trying to steal weird page\n");
819 printk(KERN_WARNING
" page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page
, page
->index
, page
->flags
, page_count(page
), page_mapcount(page
), page
->mapping
);
825 static int fuse_try_move_page(struct fuse_copy_state
*cs
, struct page
**pagep
)
828 struct page
*oldpage
= *pagep
;
829 struct page
*newpage
;
830 struct pipe_buffer
*buf
= cs
->pipebufs
;
832 err
= unlock_request(cs
->req
);
836 fuse_copy_finish(cs
);
838 err
= pipe_buf_confirm(cs
->pipe
, buf
);
842 BUG_ON(!cs
->nr_segs
);
848 if (cs
->len
!= PAGE_SIZE
)
851 if (pipe_buf_steal(cs
->pipe
, buf
) != 0)
856 if (!PageUptodate(newpage
))
857 SetPageUptodate(newpage
);
859 ClearPageMappedToDisk(newpage
);
861 if (fuse_check_page(newpage
) != 0)
862 goto out_fallback_unlock
;
865 * This is a new and locked page, it shouldn't be mapped or
866 * have any special flags on it
868 if (WARN_ON(page_mapped(oldpage
)))
869 goto out_fallback_unlock
;
870 if (WARN_ON(page_has_private(oldpage
)))
871 goto out_fallback_unlock
;
872 if (WARN_ON(PageDirty(oldpage
) || PageWriteback(oldpage
)))
873 goto out_fallback_unlock
;
874 if (WARN_ON(PageMlocked(oldpage
)))
875 goto out_fallback_unlock
;
877 err
= replace_page_cache_page(oldpage
, newpage
, GFP_KERNEL
);
879 unlock_page(newpage
);
885 if (!(buf
->flags
& PIPE_BUF_FLAG_LRU
))
886 lru_cache_add_file(newpage
);
889 spin_lock(&cs
->req
->waitq
.lock
);
890 if (test_bit(FR_ABORTED
, &cs
->req
->flags
))
894 spin_unlock(&cs
->req
->waitq
.lock
);
897 unlock_page(newpage
);
902 unlock_page(oldpage
);
909 unlock_page(newpage
);
912 cs
->offset
= buf
->offset
;
914 err
= lock_request(cs
->req
);
921 static int fuse_ref_page(struct fuse_copy_state
*cs
, struct page
*page
,
922 unsigned offset
, unsigned count
)
924 struct pipe_buffer
*buf
;
927 if (cs
->nr_segs
== cs
->pipe
->buffers
)
930 err
= unlock_request(cs
->req
);
934 fuse_copy_finish(cs
);
939 buf
->offset
= offset
;
950 * Copy a page in the request to/from the userspace buffer. Must be
953 static int fuse_copy_page(struct fuse_copy_state
*cs
, struct page
**pagep
,
954 unsigned offset
, unsigned count
, int zeroing
)
957 struct page
*page
= *pagep
;
959 if (page
&& zeroing
&& count
< PAGE_SIZE
)
960 clear_highpage(page
);
963 if (cs
->write
&& cs
->pipebufs
&& page
) {
964 return fuse_ref_page(cs
, page
, offset
, count
);
965 } else if (!cs
->len
) {
966 if (cs
->move_pages
&& page
&&
967 offset
== 0 && count
== PAGE_SIZE
) {
968 err
= fuse_try_move_page(cs
, pagep
);
972 err
= fuse_copy_fill(cs
);
978 void *mapaddr
= kmap_atomic(page
);
979 void *buf
= mapaddr
+ offset
;
980 offset
+= fuse_copy_do(cs
, &buf
, &count
);
981 kunmap_atomic(mapaddr
);
983 offset
+= fuse_copy_do(cs
, NULL
, &count
);
985 if (page
&& !cs
->write
)
986 flush_dcache_page(page
);
990 /* Copy pages in the request to/from userspace buffer */
991 static int fuse_copy_pages(struct fuse_copy_state
*cs
, unsigned nbytes
,
995 struct fuse_req
*req
= cs
->req
;
997 for (i
= 0; i
< req
->num_pages
&& (nbytes
|| zeroing
); i
++) {
999 unsigned offset
= req
->page_descs
[i
].offset
;
1000 unsigned count
= min(nbytes
, req
->page_descs
[i
].length
);
1002 err
= fuse_copy_page(cs
, &req
->pages
[i
], offset
, count
,
1012 /* Copy a single argument in the request to/from userspace buffer */
1013 static int fuse_copy_one(struct fuse_copy_state
*cs
, void *val
, unsigned size
)
1017 int err
= fuse_copy_fill(cs
);
1021 fuse_copy_do(cs
, &val
, &size
);
1026 /* Copy request arguments to/from userspace buffer */
1027 static int fuse_copy_args(struct fuse_copy_state
*cs
, unsigned numargs
,
1028 unsigned argpages
, struct fuse_arg
*args
,
1034 for (i
= 0; !err
&& i
< numargs
; i
++) {
1035 struct fuse_arg
*arg
= &args
[i
];
1036 if (i
== numargs
- 1 && argpages
)
1037 err
= fuse_copy_pages(cs
, arg
->size
, zeroing
);
1039 err
= fuse_copy_one(cs
, arg
->value
, arg
->size
);
1044 static int forget_pending(struct fuse_iqueue
*fiq
)
1046 return fiq
->forget_list_head
.next
!= NULL
;
1049 static int request_pending(struct fuse_iqueue
*fiq
)
1051 return !list_empty(&fiq
->pending
) || !list_empty(&fiq
->interrupts
) ||
1052 forget_pending(fiq
);
1056 * Transfer an interrupt request to userspace
1058 * Unlike other requests this is assembled on demand, without a need
1059 * to allocate a separate fuse_req structure.
1061 * Called with fiq->waitq.lock held, releases it
1063 static int fuse_read_interrupt(struct fuse_iqueue
*fiq
,
1064 struct fuse_copy_state
*cs
,
1065 size_t nbytes
, struct fuse_req
*req
)
1066 __releases(fiq
->waitq
.lock
)
1068 struct fuse_in_header ih
;
1069 struct fuse_interrupt_in arg
;
1070 unsigned reqsize
= sizeof(ih
) + sizeof(arg
);
1073 list_del_init(&req
->intr_entry
);
1074 req
->intr_unique
= fuse_get_unique(fiq
);
1075 memset(&ih
, 0, sizeof(ih
));
1076 memset(&arg
, 0, sizeof(arg
));
1078 ih
.opcode
= FUSE_INTERRUPT
;
1079 ih
.unique
= req
->intr_unique
;
1080 arg
.unique
= req
->in
.h
.unique
;
1082 spin_unlock(&fiq
->waitq
.lock
);
1083 if (nbytes
< reqsize
)
1086 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1088 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1089 fuse_copy_finish(cs
);
1091 return err
? err
: reqsize
;
1094 static struct fuse_forget_link
*dequeue_forget(struct fuse_iqueue
*fiq
,
1098 struct fuse_forget_link
*head
= fiq
->forget_list_head
.next
;
1099 struct fuse_forget_link
**newhead
= &head
;
1102 for (count
= 0; *newhead
!= NULL
&& count
< max
; count
++)
1103 newhead
= &(*newhead
)->next
;
1105 fiq
->forget_list_head
.next
= *newhead
;
1107 if (fiq
->forget_list_head
.next
== NULL
)
1108 fiq
->forget_list_tail
= &fiq
->forget_list_head
;
1116 static int fuse_read_single_forget(struct fuse_iqueue
*fiq
,
1117 struct fuse_copy_state
*cs
,
1119 __releases(fiq
->waitq
.lock
)
1122 struct fuse_forget_link
*forget
= dequeue_forget(fiq
, 1, NULL
);
1123 struct fuse_forget_in arg
= {
1124 .nlookup
= forget
->forget_one
.nlookup
,
1126 struct fuse_in_header ih
= {
1127 .opcode
= FUSE_FORGET
,
1128 .nodeid
= forget
->forget_one
.nodeid
,
1129 .unique
= fuse_get_unique(fiq
),
1130 .len
= sizeof(ih
) + sizeof(arg
),
1133 spin_unlock(&fiq
->waitq
.lock
);
1135 if (nbytes
< ih
.len
)
1138 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1140 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1141 fuse_copy_finish(cs
);
1149 static int fuse_read_batch_forget(struct fuse_iqueue
*fiq
,
1150 struct fuse_copy_state
*cs
, size_t nbytes
)
1151 __releases(fiq
->waitq
.lock
)
1154 unsigned max_forgets
;
1156 struct fuse_forget_link
*head
;
1157 struct fuse_batch_forget_in arg
= { .count
= 0 };
1158 struct fuse_in_header ih
= {
1159 .opcode
= FUSE_BATCH_FORGET
,
1160 .unique
= fuse_get_unique(fiq
),
1161 .len
= sizeof(ih
) + sizeof(arg
),
1164 if (nbytes
< ih
.len
) {
1165 spin_unlock(&fiq
->waitq
.lock
);
1169 max_forgets
= (nbytes
- ih
.len
) / sizeof(struct fuse_forget_one
);
1170 head
= dequeue_forget(fiq
, max_forgets
, &count
);
1171 spin_unlock(&fiq
->waitq
.lock
);
1174 ih
.len
+= count
* sizeof(struct fuse_forget_one
);
1175 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1177 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1180 struct fuse_forget_link
*forget
= head
;
1183 err
= fuse_copy_one(cs
, &forget
->forget_one
,
1184 sizeof(forget
->forget_one
));
1186 head
= forget
->next
;
1190 fuse_copy_finish(cs
);
1198 static int fuse_read_forget(struct fuse_conn
*fc
, struct fuse_iqueue
*fiq
,
1199 struct fuse_copy_state
*cs
,
1201 __releases(fiq
->waitq
.lock
)
1203 if (fc
->minor
< 16 || fiq
->forget_list_head
.next
->next
== NULL
)
1204 return fuse_read_single_forget(fiq
, cs
, nbytes
);
1206 return fuse_read_batch_forget(fiq
, cs
, nbytes
);
1210 * Read a single request into the userspace filesystem's buffer. This
1211 * function waits until a request is available, then removes it from
1212 * the pending list and copies request data to userspace buffer. If
1213 * no reply is needed (FORGET) or request has been aborted or there
1214 * was an error during the copying then it's finished by calling
1215 * request_end(). Otherwise add it to the processing list, and set
1218 static ssize_t
fuse_dev_do_read(struct fuse_dev
*fud
, struct file
*file
,
1219 struct fuse_copy_state
*cs
, size_t nbytes
)
1222 struct fuse_conn
*fc
= fud
->fc
;
1223 struct fuse_iqueue
*fiq
= &fc
->iq
;
1224 struct fuse_pqueue
*fpq
= &fud
->pq
;
1225 struct fuse_req
*req
;
1229 if (current_user_ns() != fc
->user_ns
)
1233 spin_lock(&fiq
->waitq
.lock
);
1235 if ((file
->f_flags
& O_NONBLOCK
) && fiq
->connected
&&
1236 !request_pending(fiq
))
1239 err
= wait_event_interruptible_exclusive_locked(fiq
->waitq
,
1240 !fiq
->connected
|| request_pending(fiq
));
1245 if (!fiq
->connected
)
1248 if (!list_empty(&fiq
->interrupts
)) {
1249 req
= list_entry(fiq
->interrupts
.next
, struct fuse_req
,
1251 return fuse_read_interrupt(fiq
, cs
, nbytes
, req
);
1254 if (forget_pending(fiq
)) {
1255 if (list_empty(&fiq
->pending
) || fiq
->forget_batch
-- > 0)
1256 return fuse_read_forget(fc
, fiq
, cs
, nbytes
);
1258 if (fiq
->forget_batch
<= -8)
1259 fiq
->forget_batch
= 16;
1262 req
= list_entry(fiq
->pending
.next
, struct fuse_req
, list
);
1263 clear_bit(FR_PENDING
, &req
->flags
);
1264 list_del_init(&req
->list
);
1265 spin_unlock(&fiq
->waitq
.lock
);
1268 reqsize
= in
->h
.len
;
1270 if (task_active_pid_ns(current
) != fc
->pid_ns
) {
1272 in
->h
.pid
= pid_vnr(find_pid_ns(in
->h
.pid
, fc
->pid_ns
));
1276 /* If request is too large, reply with an error and restart the read */
1277 if (nbytes
< reqsize
) {
1278 req
->out
.h
.error
= -EIO
;
1279 /* SETXATTR is special, since it may contain too large data */
1280 if (in
->h
.opcode
== FUSE_SETXATTR
)
1281 req
->out
.h
.error
= -E2BIG
;
1282 request_end(fc
, req
);
1285 spin_lock(&fpq
->lock
);
1286 list_add(&req
->list
, &fpq
->io
);
1287 spin_unlock(&fpq
->lock
);
1289 err
= fuse_copy_one(cs
, &in
->h
, sizeof(in
->h
));
1291 err
= fuse_copy_args(cs
, in
->numargs
, in
->argpages
,
1292 (struct fuse_arg
*) in
->args
, 0);
1293 fuse_copy_finish(cs
);
1294 spin_lock(&fpq
->lock
);
1295 clear_bit(FR_LOCKED
, &req
->flags
);
1296 if (!fpq
->connected
) {
1301 req
->out
.h
.error
= -EIO
;
1304 if (!test_bit(FR_ISREPLY
, &req
->flags
)) {
1308 list_move_tail(&req
->list
, &fpq
->processing
);
1309 spin_unlock(&fpq
->lock
);
1310 set_bit(FR_SENT
, &req
->flags
);
1311 /* matches barrier in request_wait_answer() */
1312 smp_mb__after_atomic();
1313 if (test_bit(FR_INTERRUPTED
, &req
->flags
))
1314 queue_interrupt(fiq
, req
);
1319 if (!test_bit(FR_PRIVATE
, &req
->flags
))
1320 list_del_init(&req
->list
);
1321 spin_unlock(&fpq
->lock
);
1322 request_end(fc
, req
);
1326 spin_unlock(&fiq
->waitq
.lock
);
1330 static int fuse_dev_open(struct inode
*inode
, struct file
*file
)
1333 * The fuse device's file's private_data is used to hold
1334 * the fuse_conn(ection) when it is mounted, and is used to
1335 * keep track of whether the file has been mounted already.
1337 file
->private_data
= NULL
;
1341 static ssize_t
fuse_dev_read(struct kiocb
*iocb
, struct iov_iter
*to
)
1343 struct fuse_copy_state cs
;
1344 struct file
*file
= iocb
->ki_filp
;
1345 struct fuse_dev
*fud
= fuse_get_dev(file
);
1350 if (!iter_is_iovec(to
))
1353 fuse_copy_init(&cs
, 1, to
);
1355 return fuse_dev_do_read(fud
, file
, &cs
, iov_iter_count(to
));
1358 static ssize_t
fuse_dev_splice_read(struct file
*in
, loff_t
*ppos
,
1359 struct pipe_inode_info
*pipe
,
1360 size_t len
, unsigned int flags
)
1364 struct pipe_buffer
*bufs
;
1365 struct fuse_copy_state cs
;
1366 struct fuse_dev
*fud
= fuse_get_dev(in
);
1371 bufs
= kmalloc(pipe
->buffers
* sizeof(struct pipe_buffer
), GFP_KERNEL
);
1375 fuse_copy_init(&cs
, 1, NULL
);
1378 ret
= fuse_dev_do_read(fud
, in
, &cs
, len
);
1382 if (pipe
->nrbufs
+ cs
.nr_segs
> pipe
->buffers
) {
1387 for (ret
= total
= 0; page_nr
< cs
.nr_segs
; total
+= ret
) {
1389 * Need to be careful about this. Having buf->ops in module
1390 * code can Oops if the buffer persists after module unload.
1392 bufs
[page_nr
].ops
= &nosteal_pipe_buf_ops
;
1393 bufs
[page_nr
].flags
= 0;
1394 ret
= add_to_pipe(pipe
, &bufs
[page_nr
++]);
1395 if (unlikely(ret
< 0))
1401 for (; page_nr
< cs
.nr_segs
; page_nr
++)
1402 put_page(bufs
[page_nr
].page
);
1408 static int fuse_notify_poll(struct fuse_conn
*fc
, unsigned int size
,
1409 struct fuse_copy_state
*cs
)
1411 struct fuse_notify_poll_wakeup_out outarg
;
1414 if (size
!= sizeof(outarg
))
1417 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1421 fuse_copy_finish(cs
);
1422 return fuse_notify_poll_wakeup(fc
, &outarg
);
1425 fuse_copy_finish(cs
);
1429 static int fuse_notify_inval_inode(struct fuse_conn
*fc
, unsigned int size
,
1430 struct fuse_copy_state
*cs
)
1432 struct fuse_notify_inval_inode_out outarg
;
1435 if (size
!= sizeof(outarg
))
1438 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1441 fuse_copy_finish(cs
);
1443 down_read(&fc
->killsb
);
1446 err
= fuse_reverse_inval_inode(fc
->sb
, outarg
.ino
,
1447 outarg
.off
, outarg
.len
);
1449 up_read(&fc
->killsb
);
1453 fuse_copy_finish(cs
);
1457 static int fuse_notify_inval_entry(struct fuse_conn
*fc
, unsigned int size
,
1458 struct fuse_copy_state
*cs
)
1460 struct fuse_notify_inval_entry_out outarg
;
1465 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1470 if (size
< sizeof(outarg
))
1473 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1477 err
= -ENAMETOOLONG
;
1478 if (outarg
.namelen
> FUSE_NAME_MAX
)
1482 if (size
!= sizeof(outarg
) + outarg
.namelen
+ 1)
1486 name
.len
= outarg
.namelen
;
1487 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1490 fuse_copy_finish(cs
);
1491 buf
[outarg
.namelen
] = 0;
1493 down_read(&fc
->killsb
);
1496 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
, 0, &name
);
1497 up_read(&fc
->killsb
);
1503 fuse_copy_finish(cs
);
1507 static int fuse_notify_delete(struct fuse_conn
*fc
, unsigned int size
,
1508 struct fuse_copy_state
*cs
)
1510 struct fuse_notify_delete_out outarg
;
1515 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1520 if (size
< sizeof(outarg
))
1523 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1527 err
= -ENAMETOOLONG
;
1528 if (outarg
.namelen
> FUSE_NAME_MAX
)
1532 if (size
!= sizeof(outarg
) + outarg
.namelen
+ 1)
1536 name
.len
= outarg
.namelen
;
1537 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1540 fuse_copy_finish(cs
);
1541 buf
[outarg
.namelen
] = 0;
1543 down_read(&fc
->killsb
);
1546 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
,
1547 outarg
.child
, &name
);
1548 up_read(&fc
->killsb
);
1554 fuse_copy_finish(cs
);
1558 static int fuse_notify_store(struct fuse_conn
*fc
, unsigned int size
,
1559 struct fuse_copy_state
*cs
)
1561 struct fuse_notify_store_out outarg
;
1562 struct inode
*inode
;
1563 struct address_space
*mapping
;
1567 unsigned int offset
;
1573 if (size
< sizeof(outarg
))
1576 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1581 if (size
- sizeof(outarg
) != outarg
.size
)
1584 nodeid
= outarg
.nodeid
;
1586 down_read(&fc
->killsb
);
1592 inode
= ilookup5(fc
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
1596 mapping
= inode
->i_mapping
;
1597 index
= outarg
.offset
>> PAGE_SHIFT
;
1598 offset
= outarg
.offset
& ~PAGE_MASK
;
1599 file_size
= i_size_read(inode
);
1600 end
= outarg
.offset
+ outarg
.size
;
1601 if (end
> file_size
) {
1603 fuse_write_update_size(inode
, file_size
);
1609 unsigned int this_num
;
1612 page
= find_or_create_page(mapping
, index
,
1613 mapping_gfp_mask(mapping
));
1617 this_num
= min_t(unsigned, num
, PAGE_SIZE
- offset
);
1618 err
= fuse_copy_page(cs
, &page
, offset
, this_num
, 0);
1619 if (!err
&& offset
== 0 &&
1620 (this_num
== PAGE_SIZE
|| file_size
== end
))
1621 SetPageUptodate(page
);
1638 up_read(&fc
->killsb
);
1640 fuse_copy_finish(cs
);
1644 static void fuse_retrieve_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
1646 release_pages(req
->pages
, req
->num_pages
);
1649 static int fuse_retrieve(struct fuse_conn
*fc
, struct inode
*inode
,
1650 struct fuse_notify_retrieve_out
*outarg
)
1653 struct address_space
*mapping
= inode
->i_mapping
;
1654 struct fuse_req
*req
;
1658 unsigned int offset
;
1659 size_t total_len
= 0;
1662 offset
= outarg
->offset
& ~PAGE_MASK
;
1663 file_size
= i_size_read(inode
);
1666 if (outarg
->offset
> file_size
)
1668 else if (outarg
->offset
+ num
> file_size
)
1669 num
= file_size
- outarg
->offset
;
1671 num_pages
= (num
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1672 num_pages
= min(num_pages
, FUSE_MAX_PAGES_PER_REQ
);
1674 req
= fuse_get_req(fc
, num_pages
);
1676 return PTR_ERR(req
);
1678 req
->in
.h
.opcode
= FUSE_NOTIFY_REPLY
;
1679 req
->in
.h
.nodeid
= outarg
->nodeid
;
1680 req
->in
.numargs
= 2;
1681 req
->in
.argpages
= 1;
1682 req
->page_descs
[0].offset
= offset
;
1683 req
->end
= fuse_retrieve_end
;
1685 index
= outarg
->offset
>> PAGE_SHIFT
;
1687 while (num
&& req
->num_pages
< num_pages
) {
1689 unsigned int this_num
;
1691 page
= find_get_page(mapping
, index
);
1695 this_num
= min_t(unsigned, num
, PAGE_SIZE
- offset
);
1696 req
->pages
[req
->num_pages
] = page
;
1697 req
->page_descs
[req
->num_pages
].length
= this_num
;
1702 total_len
+= this_num
;
1705 req
->misc
.retrieve_in
.offset
= outarg
->offset
;
1706 req
->misc
.retrieve_in
.size
= total_len
;
1707 req
->in
.args
[0].size
= sizeof(req
->misc
.retrieve_in
);
1708 req
->in
.args
[0].value
= &req
->misc
.retrieve_in
;
1709 req
->in
.args
[1].size
= total_len
;
1711 err
= fuse_request_send_notify_reply(fc
, req
, outarg
->notify_unique
);
1713 fuse_retrieve_end(fc
, req
);
1718 static int fuse_notify_retrieve(struct fuse_conn
*fc
, unsigned int size
,
1719 struct fuse_copy_state
*cs
)
1721 struct fuse_notify_retrieve_out outarg
;
1722 struct inode
*inode
;
1726 if (size
!= sizeof(outarg
))
1729 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1733 fuse_copy_finish(cs
);
1735 down_read(&fc
->killsb
);
1738 u64 nodeid
= outarg
.nodeid
;
1740 inode
= ilookup5(fc
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
1742 err
= fuse_retrieve(fc
, inode
, &outarg
);
1746 up_read(&fc
->killsb
);
1751 fuse_copy_finish(cs
);
1755 static int fuse_notify(struct fuse_conn
*fc
, enum fuse_notify_code code
,
1756 unsigned int size
, struct fuse_copy_state
*cs
)
1758 /* Don't try to move pages (yet) */
1762 case FUSE_NOTIFY_POLL
:
1763 return fuse_notify_poll(fc
, size
, cs
);
1765 case FUSE_NOTIFY_INVAL_INODE
:
1766 return fuse_notify_inval_inode(fc
, size
, cs
);
1768 case FUSE_NOTIFY_INVAL_ENTRY
:
1769 return fuse_notify_inval_entry(fc
, size
, cs
);
1771 case FUSE_NOTIFY_STORE
:
1772 return fuse_notify_store(fc
, size
, cs
);
1774 case FUSE_NOTIFY_RETRIEVE
:
1775 return fuse_notify_retrieve(fc
, size
, cs
);
1777 case FUSE_NOTIFY_DELETE
:
1778 return fuse_notify_delete(fc
, size
, cs
);
1781 fuse_copy_finish(cs
);
1786 /* Look up request on processing list by unique ID */
1787 static struct fuse_req
*request_find(struct fuse_pqueue
*fpq
, u64 unique
)
1789 struct fuse_req
*req
;
1791 list_for_each_entry(req
, &fpq
->processing
, list
) {
1792 if (req
->in
.h
.unique
== unique
|| req
->intr_unique
== unique
)
1798 static int copy_out_args(struct fuse_copy_state
*cs
, struct fuse_out
*out
,
1801 unsigned reqsize
= sizeof(struct fuse_out_header
);
1804 return nbytes
!= reqsize
? -EINVAL
: 0;
1806 reqsize
+= len_args(out
->numargs
, out
->args
);
1808 if (reqsize
< nbytes
|| (reqsize
> nbytes
&& !out
->argvar
))
1810 else if (reqsize
> nbytes
) {
1811 struct fuse_arg
*lastarg
= &out
->args
[out
->numargs
-1];
1812 unsigned diffsize
= reqsize
- nbytes
;
1813 if (diffsize
> lastarg
->size
)
1815 lastarg
->size
-= diffsize
;
1817 return fuse_copy_args(cs
, out
->numargs
, out
->argpages
, out
->args
,
1822 * Write a single reply to a request. First the header is copied from
1823 * the write buffer. The request is then searched on the processing
1824 * list by the unique ID found in the header. If found, then remove
1825 * it from the list and copy the rest of the buffer to the request.
1826 * The request is finished by calling request_end()
1828 static ssize_t
fuse_dev_do_write(struct fuse_dev
*fud
,
1829 struct fuse_copy_state
*cs
, size_t nbytes
)
1832 struct fuse_conn
*fc
= fud
->fc
;
1833 struct fuse_pqueue
*fpq
= &fud
->pq
;
1834 struct fuse_req
*req
;
1835 struct fuse_out_header oh
;
1837 if (current_user_ns() != fc
->user_ns
)
1840 if (nbytes
< sizeof(struct fuse_out_header
))
1843 err
= fuse_copy_one(cs
, &oh
, sizeof(oh
));
1848 if (oh
.len
!= nbytes
)
1852 * Zero oh.unique indicates unsolicited notification message
1853 * and error contains notification code.
1856 err
= fuse_notify(fc
, oh
.error
, nbytes
- sizeof(oh
), cs
);
1857 return err
? err
: nbytes
;
1861 if (oh
.error
<= -1000 || oh
.error
> 0)
1864 spin_lock(&fpq
->lock
);
1866 if (!fpq
->connected
)
1869 req
= request_find(fpq
, oh
.unique
);
1873 /* Is it an interrupt reply? */
1874 if (req
->intr_unique
== oh
.unique
) {
1875 spin_unlock(&fpq
->lock
);
1878 if (nbytes
!= sizeof(struct fuse_out_header
))
1881 if (oh
.error
== -ENOSYS
)
1882 fc
->no_interrupt
= 1;
1883 else if (oh
.error
== -EAGAIN
)
1884 queue_interrupt(&fc
->iq
, req
);
1886 fuse_copy_finish(cs
);
1890 clear_bit(FR_SENT
, &req
->flags
);
1891 list_move(&req
->list
, &fpq
->io
);
1893 set_bit(FR_LOCKED
, &req
->flags
);
1894 spin_unlock(&fpq
->lock
);
1896 if (!req
->out
.page_replace
)
1899 err
= copy_out_args(cs
, &req
->out
, nbytes
);
1900 fuse_copy_finish(cs
);
1902 spin_lock(&fpq
->lock
);
1903 clear_bit(FR_LOCKED
, &req
->flags
);
1904 if (!fpq
->connected
)
1907 req
->out
.h
.error
= -EIO
;
1908 if (!test_bit(FR_PRIVATE
, &req
->flags
))
1909 list_del_init(&req
->list
);
1910 spin_unlock(&fpq
->lock
);
1912 request_end(fc
, req
);
1914 return err
? err
: nbytes
;
1917 spin_unlock(&fpq
->lock
);
1919 fuse_copy_finish(cs
);
1923 static ssize_t
fuse_dev_write(struct kiocb
*iocb
, struct iov_iter
*from
)
1925 struct fuse_copy_state cs
;
1926 struct fuse_dev
*fud
= fuse_get_dev(iocb
->ki_filp
);
1931 if (!iter_is_iovec(from
))
1934 fuse_copy_init(&cs
, 0, from
);
1936 return fuse_dev_do_write(fud
, &cs
, iov_iter_count(from
));
1939 static ssize_t
fuse_dev_splice_write(struct pipe_inode_info
*pipe
,
1940 struct file
*out
, loff_t
*ppos
,
1941 size_t len
, unsigned int flags
)
1945 struct pipe_buffer
*bufs
;
1946 struct fuse_copy_state cs
;
1947 struct fuse_dev
*fud
;
1951 fud
= fuse_get_dev(out
);
1955 bufs
= kmalloc(pipe
->buffers
* sizeof(struct pipe_buffer
), GFP_KERNEL
);
1962 for (idx
= 0; idx
< pipe
->nrbufs
&& rem
< len
; idx
++)
1963 rem
+= pipe
->bufs
[(pipe
->curbuf
+ idx
) & (pipe
->buffers
- 1)].len
;
1973 struct pipe_buffer
*ibuf
;
1974 struct pipe_buffer
*obuf
;
1976 BUG_ON(nbuf
>= pipe
->buffers
);
1977 BUG_ON(!pipe
->nrbufs
);
1978 ibuf
= &pipe
->bufs
[pipe
->curbuf
];
1981 if (rem
>= ibuf
->len
) {
1984 pipe
->curbuf
= (pipe
->curbuf
+ 1) & (pipe
->buffers
- 1);
1987 pipe_buf_get(pipe
, ibuf
);
1989 obuf
->flags
&= ~PIPE_BUF_FLAG_GIFT
;
1991 ibuf
->offset
+= obuf
->len
;
1992 ibuf
->len
-= obuf
->len
;
1999 fuse_copy_init(&cs
, 0, NULL
);
2004 if (flags
& SPLICE_F_MOVE
)
2007 ret
= fuse_dev_do_write(fud
, &cs
, len
);
2009 for (idx
= 0; idx
< nbuf
; idx
++)
2010 pipe_buf_release(pipe
, &bufs
[idx
]);
2017 static unsigned fuse_dev_poll(struct file
*file
, poll_table
*wait
)
2019 unsigned mask
= POLLOUT
| POLLWRNORM
;
2020 struct fuse_iqueue
*fiq
;
2021 struct fuse_dev
*fud
= fuse_get_dev(file
);
2027 poll_wait(file
, &fiq
->waitq
, wait
);
2029 spin_lock(&fiq
->waitq
.lock
);
2030 if (!fiq
->connected
)
2032 else if (request_pending(fiq
))
2033 mask
|= POLLIN
| POLLRDNORM
;
2034 spin_unlock(&fiq
->waitq
.lock
);
2040 * Abort all requests on the given list (pending or processing)
2042 * This function releases and reacquires fc->lock
2044 static void end_requests(struct fuse_conn
*fc
, struct list_head
*head
)
2046 while (!list_empty(head
)) {
2047 struct fuse_req
*req
;
2048 req
= list_entry(head
->next
, struct fuse_req
, list
);
2049 req
->out
.h
.error
= -ECONNABORTED
;
2050 clear_bit(FR_SENT
, &req
->flags
);
2051 list_del_init(&req
->list
);
2052 request_end(fc
, req
);
2056 static void end_polls(struct fuse_conn
*fc
)
2060 p
= rb_first(&fc
->polled_files
);
2063 struct fuse_file
*ff
;
2064 ff
= rb_entry(p
, struct fuse_file
, polled_node
);
2065 wake_up_interruptible_all(&ff
->poll_wait
);
2072 * Abort all requests.
2074 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2077 * The same effect is usually achievable through killing the filesystem daemon
2078 * and all users of the filesystem. The exception is the combination of an
2079 * asynchronous request and the tricky deadlock (see
2080 * Documentation/filesystems/fuse.txt).
2082 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2083 * requests, they should be finished off immediately. Locked requests will be
2084 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2085 * requests. It is possible that some request will finish before we can. This
2086 * is OK, the request will in that case be removed from the list before we touch
2089 void fuse_abort_conn(struct fuse_conn
*fc
)
2091 struct fuse_iqueue
*fiq
= &fc
->iq
;
2093 spin_lock(&fc
->lock
);
2094 if (fc
->connected
) {
2095 struct fuse_dev
*fud
;
2096 struct fuse_req
*req
, *next
;
2102 fuse_set_initialized(fc
);
2103 list_for_each_entry(fud
, &fc
->devices
, entry
) {
2104 struct fuse_pqueue
*fpq
= &fud
->pq
;
2106 spin_lock(&fpq
->lock
);
2108 list_for_each_entry_safe(req
, next
, &fpq
->io
, list
) {
2109 req
->out
.h
.error
= -ECONNABORTED
;
2110 spin_lock(&req
->waitq
.lock
);
2111 set_bit(FR_ABORTED
, &req
->flags
);
2112 if (!test_bit(FR_LOCKED
, &req
->flags
)) {
2113 set_bit(FR_PRIVATE
, &req
->flags
);
2114 __fuse_get_request(req
);
2115 list_move(&req
->list
, &to_end1
);
2117 spin_unlock(&req
->waitq
.lock
);
2119 list_splice_init(&fpq
->processing
, &to_end2
);
2120 spin_unlock(&fpq
->lock
);
2122 fc
->max_background
= UINT_MAX
;
2125 spin_lock(&fiq
->waitq
.lock
);
2127 list_splice_init(&fiq
->pending
, &to_end2
);
2128 list_for_each_entry(req
, &to_end2
, list
)
2129 clear_bit(FR_PENDING
, &req
->flags
);
2130 while (forget_pending(fiq
))
2131 kfree(dequeue_forget(fiq
, 1, NULL
));
2132 wake_up_all_locked(&fiq
->waitq
);
2133 spin_unlock(&fiq
->waitq
.lock
);
2134 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
2136 wake_up_all(&fc
->blocked_waitq
);
2137 spin_unlock(&fc
->lock
);
2139 while (!list_empty(&to_end1
)) {
2140 req
= list_first_entry(&to_end1
, struct fuse_req
, list
);
2141 list_del_init(&req
->list
);
2142 request_end(fc
, req
);
2144 end_requests(fc
, &to_end2
);
2146 spin_unlock(&fc
->lock
);
2149 EXPORT_SYMBOL_GPL(fuse_abort_conn
);
2151 int fuse_dev_release(struct inode
*inode
, struct file
*file
)
2153 struct fuse_dev
*fud
= fuse_get_dev(file
);
2156 struct fuse_conn
*fc
= fud
->fc
;
2157 struct fuse_pqueue
*fpq
= &fud
->pq
;
2160 spin_lock(&fpq
->lock
);
2161 WARN_ON(!list_empty(&fpq
->io
));
2162 list_splice_init(&fpq
->processing
, &to_end
);
2163 spin_unlock(&fpq
->lock
);
2165 end_requests(fc
, &to_end
);
2167 /* Are we the last open device? */
2168 if (atomic_dec_and_test(&fc
->dev_count
)) {
2169 WARN_ON(fc
->iq
.fasync
!= NULL
);
2170 fuse_abort_conn(fc
);
2176 EXPORT_SYMBOL_GPL(fuse_dev_release
);
2178 static int fuse_dev_fasync(int fd
, struct file
*file
, int on
)
2180 struct fuse_dev
*fud
= fuse_get_dev(file
);
2185 /* No locking - fasync_helper does its own locking */
2186 return fasync_helper(fd
, file
, on
, &fud
->fc
->iq
.fasync
);
2189 static int fuse_device_clone(struct fuse_conn
*fc
, struct file
*new)
2191 struct fuse_dev
*fud
;
2193 if (new->private_data
)
2196 fud
= fuse_dev_alloc(fc
);
2200 new->private_data
= fud
;
2201 atomic_inc(&fc
->dev_count
);
2206 static long fuse_dev_ioctl(struct file
*file
, unsigned int cmd
,
2211 if (cmd
== FUSE_DEV_IOC_CLONE
) {
2215 if (!get_user(oldfd
, (__u32 __user
*) arg
)) {
2216 struct file
*old
= fget(oldfd
);
2220 struct fuse_dev
*fud
= NULL
;
2223 * Check against file->f_op because CUSE
2224 * uses the same ioctl handler.
2226 if (old
->f_op
== file
->f_op
&&
2227 old
->f_cred
->user_ns
== file
->f_cred
->user_ns
)
2228 fud
= fuse_get_dev(old
);
2231 mutex_lock(&fuse_mutex
);
2232 err
= fuse_device_clone(fud
->fc
, file
);
2233 mutex_unlock(&fuse_mutex
);
2242 const struct file_operations fuse_dev_operations
= {
2243 .owner
= THIS_MODULE
,
2244 .open
= fuse_dev_open
,
2245 .llseek
= no_llseek
,
2246 .read_iter
= fuse_dev_read
,
2247 .splice_read
= fuse_dev_splice_read
,
2248 .write_iter
= fuse_dev_write
,
2249 .splice_write
= fuse_dev_splice_write
,
2250 .poll
= fuse_dev_poll
,
2251 .release
= fuse_dev_release
,
2252 .fasync
= fuse_dev_fasync
,
2253 .unlocked_ioctl
= fuse_dev_ioctl
,
2254 .compat_ioctl
= fuse_dev_ioctl
,
2256 EXPORT_SYMBOL_GPL(fuse_dev_operations
);
2258 static struct miscdevice fuse_miscdevice
= {
2259 .minor
= FUSE_MINOR
,
2261 .fops
= &fuse_dev_operations
,
2264 int __init
fuse_dev_init(void)
2267 fuse_req_cachep
= kmem_cache_create("fuse_request",
2268 sizeof(struct fuse_req
),
2270 if (!fuse_req_cachep
)
2273 err
= misc_register(&fuse_miscdevice
);
2275 goto out_cache_clean
;
2280 kmem_cache_destroy(fuse_req_cachep
);
2285 void fuse_dev_cleanup(void)
2287 misc_deregister(&fuse_miscdevice
);
2288 kmem_cache_destroy(fuse_req_cachep
);