2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/sched/signal.h>
15 #include <linux/uio.h>
16 #include <linux/miscdevice.h>
17 #include <linux/pagemap.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/pipe_fs_i.h>
21 #include <linux/swap.h>
22 #include <linux/splice.h>
23 #include <linux/sched.h>
25 MODULE_ALIAS_MISCDEV(FUSE_MINOR
);
26 MODULE_ALIAS("devname:fuse");
28 /* Ordinary requests have even IDs, while interrupts IDs are odd */
29 #define FUSE_INT_REQ_BIT (1ULL << 0)
30 #define FUSE_REQ_ID_STEP (1ULL << 1)
32 static struct kmem_cache
*fuse_req_cachep
;
34 static struct fuse_dev
*fuse_get_dev(struct file
*file
)
37 * Lockless access is OK, because file->private data is set
38 * once during mount and is valid until the file is released.
40 return READ_ONCE(file
->private_data
);
43 static void fuse_request_init(struct fuse_req
*req
, struct page
**pages
,
44 struct fuse_page_desc
*page_descs
,
47 INIT_LIST_HEAD(&req
->list
);
48 INIT_LIST_HEAD(&req
->intr_entry
);
49 init_waitqueue_head(&req
->waitq
);
50 refcount_set(&req
->count
, 1);
52 req
->page_descs
= page_descs
;
53 req
->max_pages
= npages
;
54 __set_bit(FR_PENDING
, &req
->flags
);
57 static struct page
**fuse_req_pages_alloc(unsigned int npages
, gfp_t flags
,
58 struct fuse_page_desc
**desc
)
62 pages
= kzalloc(npages
* (sizeof(struct page
*) +
63 sizeof(struct fuse_page_desc
)), flags
);
64 *desc
= (void *) pages
+ npages
* sizeof(struct page
*);
69 static struct fuse_req
*__fuse_request_alloc(unsigned npages
, gfp_t flags
)
71 struct fuse_req
*req
= kmem_cache_zalloc(fuse_req_cachep
, flags
);
73 struct page
**pages
= NULL
;
74 struct fuse_page_desc
*page_descs
= NULL
;
76 WARN_ON(npages
> FUSE_MAX_MAX_PAGES
);
77 if (npages
> FUSE_REQ_INLINE_PAGES
) {
78 pages
= fuse_req_pages_alloc(npages
, flags
,
81 kmem_cache_free(fuse_req_cachep
, req
);
85 pages
= req
->inline_pages
;
86 page_descs
= req
->inline_page_descs
;
89 fuse_request_init(req
, pages
, page_descs
, npages
);
94 struct fuse_req
*fuse_request_alloc(unsigned npages
)
96 return __fuse_request_alloc(npages
, GFP_KERNEL
);
98 EXPORT_SYMBOL_GPL(fuse_request_alloc
);
100 struct fuse_req
*fuse_request_alloc_nofs(unsigned npages
)
102 return __fuse_request_alloc(npages
, GFP_NOFS
);
105 static void fuse_req_pages_free(struct fuse_req
*req
)
107 if (req
->pages
!= req
->inline_pages
)
111 bool fuse_req_realloc_pages(struct fuse_conn
*fc
, struct fuse_req
*req
,
115 struct fuse_page_desc
*page_descs
;
116 unsigned int npages
= min_t(unsigned int,
117 max_t(unsigned int, req
->max_pages
* 2,
118 FUSE_DEFAULT_MAX_PAGES_PER_REQ
),
120 WARN_ON(npages
<= req
->max_pages
);
122 pages
= fuse_req_pages_alloc(npages
, flags
, &page_descs
);
126 memcpy(pages
, req
->pages
, sizeof(struct page
*) * req
->max_pages
);
127 memcpy(page_descs
, req
->page_descs
,
128 sizeof(struct fuse_page_desc
) * req
->max_pages
);
129 fuse_req_pages_free(req
);
131 req
->page_descs
= page_descs
;
132 req
->max_pages
= npages
;
137 void fuse_request_free(struct fuse_req
*req
)
139 fuse_req_pages_free(req
);
140 kmem_cache_free(fuse_req_cachep
, req
);
143 void __fuse_get_request(struct fuse_req
*req
)
145 refcount_inc(&req
->count
);
148 /* Must be called with > 1 refcount */
149 static void __fuse_put_request(struct fuse_req
*req
)
151 refcount_dec(&req
->count
);
154 void fuse_set_initialized(struct fuse_conn
*fc
)
156 /* Make sure stores before this are seen on another CPU */
161 static bool fuse_block_alloc(struct fuse_conn
*fc
, bool for_background
)
163 return !fc
->initialized
|| (for_background
&& fc
->blocked
);
166 static void fuse_drop_waiting(struct fuse_conn
*fc
)
169 atomic_dec(&fc
->num_waiting
);
170 } else if (atomic_dec_and_test(&fc
->num_waiting
)) {
171 /* wake up aborters */
172 wake_up_all(&fc
->blocked_waitq
);
176 static struct fuse_req
*__fuse_get_req(struct fuse_conn
*fc
, unsigned npages
,
179 struct fuse_req
*req
;
181 atomic_inc(&fc
->num_waiting
);
183 if (fuse_block_alloc(fc
, for_background
)) {
185 if (wait_event_killable_exclusive(fc
->blocked_waitq
,
186 !fuse_block_alloc(fc
, for_background
)))
189 /* Matches smp_wmb() in fuse_set_initialized() */
200 req
= fuse_request_alloc(npages
);
204 wake_up(&fc
->blocked_waitq
);
208 req
->in
.h
.uid
= from_kuid(fc
->user_ns
, current_fsuid());
209 req
->in
.h
.gid
= from_kgid(fc
->user_ns
, current_fsgid());
210 req
->in
.h
.pid
= pid_nr_ns(task_pid(current
), fc
->pid_ns
);
212 __set_bit(FR_WAITING
, &req
->flags
);
214 __set_bit(FR_BACKGROUND
, &req
->flags
);
216 if (unlikely(req
->in
.h
.uid
== ((uid_t
)-1) ||
217 req
->in
.h
.gid
== ((gid_t
)-1))) {
218 fuse_put_request(fc
, req
);
219 return ERR_PTR(-EOVERFLOW
);
224 fuse_drop_waiting(fc
);
228 struct fuse_req
*fuse_get_req(struct fuse_conn
*fc
, unsigned npages
)
230 return __fuse_get_req(fc
, npages
, false);
232 EXPORT_SYMBOL_GPL(fuse_get_req
);
234 struct fuse_req
*fuse_get_req_for_background(struct fuse_conn
*fc
,
237 return __fuse_get_req(fc
, npages
, true);
239 EXPORT_SYMBOL_GPL(fuse_get_req_for_background
);
242 * Return request in fuse_file->reserved_req. However that may
243 * currently be in use. If that is the case, wait for it to become
246 static struct fuse_req
*get_reserved_req(struct fuse_conn
*fc
,
249 struct fuse_req
*req
= NULL
;
250 struct fuse_file
*ff
= file
->private_data
;
253 wait_event(fc
->reserved_req_waitq
, ff
->reserved_req
);
254 spin_lock(&fc
->lock
);
255 if (ff
->reserved_req
) {
256 req
= ff
->reserved_req
;
257 ff
->reserved_req
= NULL
;
258 req
->stolen_file
= get_file(file
);
260 spin_unlock(&fc
->lock
);
267 * Put stolen request back into fuse_file->reserved_req
269 static void put_reserved_req(struct fuse_conn
*fc
, struct fuse_req
*req
)
271 struct file
*file
= req
->stolen_file
;
272 struct fuse_file
*ff
= file
->private_data
;
274 WARN_ON(req
->max_pages
);
275 spin_lock(&fc
->lock
);
276 memset(req
, 0, sizeof(*req
));
277 fuse_request_init(req
, NULL
, NULL
, 0);
278 BUG_ON(ff
->reserved_req
);
279 ff
->reserved_req
= req
;
280 wake_up_all(&fc
->reserved_req_waitq
);
281 spin_unlock(&fc
->lock
);
286 * Gets a requests for a file operation, always succeeds
288 * This is used for sending the FLUSH request, which must get to
289 * userspace, due to POSIX locks which may need to be unlocked.
291 * If allocation fails due to OOM, use the reserved request in
294 * This is very unlikely to deadlock accidentally, since the
295 * filesystem should not have it's own file open. If deadlock is
296 * intentional, it can still be broken by "aborting" the filesystem.
298 struct fuse_req
*fuse_get_req_nofail_nopages(struct fuse_conn
*fc
,
301 struct fuse_req
*req
;
303 atomic_inc(&fc
->num_waiting
);
304 wait_event(fc
->blocked_waitq
, fc
->initialized
);
305 /* Matches smp_wmb() in fuse_set_initialized() */
307 req
= fuse_request_alloc(0);
309 req
= get_reserved_req(fc
, file
);
311 req
->in
.h
.uid
= from_kuid_munged(fc
->user_ns
, current_fsuid());
312 req
->in
.h
.gid
= from_kgid_munged(fc
->user_ns
, current_fsgid());
313 req
->in
.h
.pid
= pid_nr_ns(task_pid(current
), fc
->pid_ns
);
315 __set_bit(FR_WAITING
, &req
->flags
);
316 __clear_bit(FR_BACKGROUND
, &req
->flags
);
320 void fuse_put_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
322 if (refcount_dec_and_test(&req
->count
)) {
323 if (test_bit(FR_BACKGROUND
, &req
->flags
)) {
325 * We get here in the unlikely case that a background
326 * request was allocated but not sent
328 spin_lock(&fc
->bg_lock
);
330 wake_up(&fc
->blocked_waitq
);
331 spin_unlock(&fc
->bg_lock
);
334 if (test_bit(FR_WAITING
, &req
->flags
)) {
335 __clear_bit(FR_WAITING
, &req
->flags
);
336 fuse_drop_waiting(fc
);
339 if (req
->stolen_file
)
340 put_reserved_req(fc
, req
);
342 fuse_request_free(req
);
345 EXPORT_SYMBOL_GPL(fuse_put_request
);
347 static unsigned len_args(unsigned numargs
, struct fuse_arg
*args
)
352 for (i
= 0; i
< numargs
; i
++)
353 nbytes
+= args
[i
].size
;
358 static u64
fuse_get_unique(struct fuse_iqueue
*fiq
)
360 fiq
->reqctr
+= FUSE_REQ_ID_STEP
;
364 static unsigned int fuse_req_hash(u64 unique
)
366 return hash_long(unique
& ~FUSE_INT_REQ_BIT
, FUSE_PQ_HASH_BITS
);
369 static void queue_request(struct fuse_iqueue
*fiq
, struct fuse_req
*req
)
371 req
->in
.h
.len
= sizeof(struct fuse_in_header
) +
372 len_args(req
->in
.numargs
, (struct fuse_arg
*) req
->in
.args
);
373 list_add_tail(&req
->list
, &fiq
->pending
);
374 wake_up_locked(&fiq
->waitq
);
375 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
378 void fuse_queue_forget(struct fuse_conn
*fc
, struct fuse_forget_link
*forget
,
379 u64 nodeid
, u64 nlookup
)
381 struct fuse_iqueue
*fiq
= &fc
->iq
;
383 forget
->forget_one
.nodeid
= nodeid
;
384 forget
->forget_one
.nlookup
= nlookup
;
386 spin_lock(&fiq
->waitq
.lock
);
387 if (fiq
->connected
) {
388 fiq
->forget_list_tail
->next
= forget
;
389 fiq
->forget_list_tail
= forget
;
390 wake_up_locked(&fiq
->waitq
);
391 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
395 spin_unlock(&fiq
->waitq
.lock
);
398 static void flush_bg_queue(struct fuse_conn
*fc
)
400 struct fuse_iqueue
*fiq
= &fc
->iq
;
402 while (fc
->active_background
< fc
->max_background
&&
403 !list_empty(&fc
->bg_queue
)) {
404 struct fuse_req
*req
;
406 req
= list_first_entry(&fc
->bg_queue
, struct fuse_req
, list
);
407 list_del(&req
->list
);
408 fc
->active_background
++;
409 spin_lock(&fiq
->waitq
.lock
);
410 req
->in
.h
.unique
= fuse_get_unique(fiq
);
411 queue_request(fiq
, req
);
412 spin_unlock(&fiq
->waitq
.lock
);
417 * This function is called when a request is finished. Either a reply
418 * has arrived or it was aborted (and not yet sent) or some error
419 * occurred during communication with userspace, or the device file
420 * was closed. The requester thread is woken up (if still waiting),
421 * the 'end' callback is called if given, else the reference to the
422 * request is released
424 static void request_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
426 struct fuse_iqueue
*fiq
= &fc
->iq
;
428 if (test_and_set_bit(FR_FINISHED
, &req
->flags
))
431 spin_lock(&fiq
->waitq
.lock
);
432 list_del_init(&req
->intr_entry
);
433 spin_unlock(&fiq
->waitq
.lock
);
434 WARN_ON(test_bit(FR_PENDING
, &req
->flags
));
435 WARN_ON(test_bit(FR_SENT
, &req
->flags
));
436 if (test_bit(FR_BACKGROUND
, &req
->flags
)) {
437 spin_lock(&fc
->bg_lock
);
438 clear_bit(FR_BACKGROUND
, &req
->flags
);
439 if (fc
->num_background
== fc
->max_background
) {
441 wake_up(&fc
->blocked_waitq
);
442 } else if (!fc
->blocked
) {
444 * Wake up next waiter, if any. It's okay to use
445 * waitqueue_active(), as we've already synced up
446 * fc->blocked with waiters with the wake_up() call
449 if (waitqueue_active(&fc
->blocked_waitq
))
450 wake_up(&fc
->blocked_waitq
);
453 if (fc
->num_background
== fc
->congestion_threshold
&& fc
->sb
) {
454 clear_bdi_congested(fc
->sb
->s_bdi
, BLK_RW_SYNC
);
455 clear_bdi_congested(fc
->sb
->s_bdi
, BLK_RW_ASYNC
);
457 fc
->num_background
--;
458 fc
->active_background
--;
460 spin_unlock(&fc
->bg_lock
);
462 wake_up(&req
->waitq
);
466 fuse_put_request(fc
, req
);
469 static void queue_interrupt(struct fuse_iqueue
*fiq
, struct fuse_req
*req
)
471 spin_lock(&fiq
->waitq
.lock
);
472 if (test_bit(FR_FINISHED
, &req
->flags
)) {
473 spin_unlock(&fiq
->waitq
.lock
);
476 if (list_empty(&req
->intr_entry
)) {
477 list_add_tail(&req
->intr_entry
, &fiq
->interrupts
);
478 wake_up_locked(&fiq
->waitq
);
480 spin_unlock(&fiq
->waitq
.lock
);
481 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
484 static void request_wait_answer(struct fuse_conn
*fc
, struct fuse_req
*req
)
486 struct fuse_iqueue
*fiq
= &fc
->iq
;
489 if (!fc
->no_interrupt
) {
490 /* Any signal may interrupt this */
491 err
= wait_event_interruptible(req
->waitq
,
492 test_bit(FR_FINISHED
, &req
->flags
));
496 set_bit(FR_INTERRUPTED
, &req
->flags
);
497 /* matches barrier in fuse_dev_do_read() */
498 smp_mb__after_atomic();
499 if (test_bit(FR_SENT
, &req
->flags
))
500 queue_interrupt(fiq
, req
);
503 if (!test_bit(FR_FORCE
, &req
->flags
)) {
504 /* Only fatal signals may interrupt this */
505 err
= wait_event_killable(req
->waitq
,
506 test_bit(FR_FINISHED
, &req
->flags
));
510 spin_lock(&fiq
->waitq
.lock
);
511 /* Request is not yet in userspace, bail out */
512 if (test_bit(FR_PENDING
, &req
->flags
)) {
513 list_del(&req
->list
);
514 spin_unlock(&fiq
->waitq
.lock
);
515 __fuse_put_request(req
);
516 req
->out
.h
.error
= -EINTR
;
519 spin_unlock(&fiq
->waitq
.lock
);
523 * Either request is already in userspace, or it was forced.
526 wait_event(req
->waitq
, test_bit(FR_FINISHED
, &req
->flags
));
529 static void __fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
531 struct fuse_iqueue
*fiq
= &fc
->iq
;
533 BUG_ON(test_bit(FR_BACKGROUND
, &req
->flags
));
534 spin_lock(&fiq
->waitq
.lock
);
535 if (!fiq
->connected
) {
536 spin_unlock(&fiq
->waitq
.lock
);
537 req
->out
.h
.error
= -ENOTCONN
;
539 req
->in
.h
.unique
= fuse_get_unique(fiq
);
540 queue_request(fiq
, req
);
541 /* acquire extra reference, since request is still needed
542 after request_end() */
543 __fuse_get_request(req
);
544 spin_unlock(&fiq
->waitq
.lock
);
546 request_wait_answer(fc
, req
);
547 /* Pairs with smp_wmb() in request_end() */
552 void fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
554 __set_bit(FR_ISREPLY
, &req
->flags
);
555 if (!test_bit(FR_WAITING
, &req
->flags
)) {
556 __set_bit(FR_WAITING
, &req
->flags
);
557 atomic_inc(&fc
->num_waiting
);
559 __fuse_request_send(fc
, req
);
561 EXPORT_SYMBOL_GPL(fuse_request_send
);
563 static void fuse_adjust_compat(struct fuse_conn
*fc
, struct fuse_args
*args
)
565 if (fc
->minor
< 4 && args
->in
.h
.opcode
== FUSE_STATFS
)
566 args
->out
.args
[0].size
= FUSE_COMPAT_STATFS_SIZE
;
569 switch (args
->in
.h
.opcode
) {
576 args
->out
.args
[0].size
= FUSE_COMPAT_ENTRY_OUT_SIZE
;
580 args
->out
.args
[0].size
= FUSE_COMPAT_ATTR_OUT_SIZE
;
584 if (fc
->minor
< 12) {
585 switch (args
->in
.h
.opcode
) {
587 args
->in
.args
[0].size
= sizeof(struct fuse_open_in
);
590 args
->in
.args
[0].size
= FUSE_COMPAT_MKNOD_IN_SIZE
;
596 ssize_t
fuse_simple_request(struct fuse_conn
*fc
, struct fuse_args
*args
)
598 struct fuse_req
*req
;
601 req
= fuse_get_req(fc
, 0);
605 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
606 fuse_adjust_compat(fc
, args
);
608 req
->in
.h
.opcode
= args
->in
.h
.opcode
;
609 req
->in
.h
.nodeid
= args
->in
.h
.nodeid
;
610 req
->in
.numargs
= args
->in
.numargs
;
611 memcpy(req
->in
.args
, args
->in
.args
,
612 args
->in
.numargs
* sizeof(struct fuse_in_arg
));
613 req
->out
.argvar
= args
->out
.argvar
;
614 req
->out
.numargs
= args
->out
.numargs
;
615 memcpy(req
->out
.args
, args
->out
.args
,
616 args
->out
.numargs
* sizeof(struct fuse_arg
));
617 fuse_request_send(fc
, req
);
618 ret
= req
->out
.h
.error
;
619 if (!ret
&& args
->out
.argvar
) {
620 BUG_ON(args
->out
.numargs
!= 1);
621 ret
= req
->out
.args
[0].size
;
623 fuse_put_request(fc
, req
);
628 bool fuse_request_queue_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
632 WARN_ON(!test_bit(FR_BACKGROUND
, &req
->flags
));
633 if (!test_bit(FR_WAITING
, &req
->flags
)) {
634 __set_bit(FR_WAITING
, &req
->flags
);
635 atomic_inc(&fc
->num_waiting
);
637 __set_bit(FR_ISREPLY
, &req
->flags
);
638 spin_lock(&fc
->bg_lock
);
639 if (likely(fc
->connected
)) {
640 fc
->num_background
++;
641 if (fc
->num_background
== fc
->max_background
)
643 if (fc
->num_background
== fc
->congestion_threshold
&& fc
->sb
) {
644 set_bdi_congested(fc
->sb
->s_bdi
, BLK_RW_SYNC
);
645 set_bdi_congested(fc
->sb
->s_bdi
, BLK_RW_ASYNC
);
647 list_add_tail(&req
->list
, &fc
->bg_queue
);
651 spin_unlock(&fc
->bg_lock
);
656 void fuse_request_send_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
659 if (!fuse_request_queue_background(fc
, req
)) {
660 req
->out
.h
.error
= -ENOTCONN
;
662 fuse_put_request(fc
, req
);
665 EXPORT_SYMBOL_GPL(fuse_request_send_background
);
667 static int fuse_request_send_notify_reply(struct fuse_conn
*fc
,
668 struct fuse_req
*req
, u64 unique
)
671 struct fuse_iqueue
*fiq
= &fc
->iq
;
673 __clear_bit(FR_ISREPLY
, &req
->flags
);
674 req
->in
.h
.unique
= unique
;
675 spin_lock(&fiq
->waitq
.lock
);
676 if (fiq
->connected
) {
677 queue_request(fiq
, req
);
680 spin_unlock(&fiq
->waitq
.lock
);
685 void fuse_force_forget(struct file
*file
, u64 nodeid
)
687 struct inode
*inode
= file_inode(file
);
688 struct fuse_conn
*fc
= get_fuse_conn(inode
);
689 struct fuse_req
*req
;
690 struct fuse_forget_in inarg
;
692 memset(&inarg
, 0, sizeof(inarg
));
694 req
= fuse_get_req_nofail_nopages(fc
, file
);
695 req
->in
.h
.opcode
= FUSE_FORGET
;
696 req
->in
.h
.nodeid
= nodeid
;
698 req
->in
.args
[0].size
= sizeof(inarg
);
699 req
->in
.args
[0].value
= &inarg
;
700 __clear_bit(FR_ISREPLY
, &req
->flags
);
701 __fuse_request_send(fc
, req
);
703 fuse_put_request(fc
, req
);
707 * Lock the request. Up to the next unlock_request() there mustn't be
708 * anything that could cause a page-fault. If the request was already
711 static int lock_request(struct fuse_req
*req
)
715 spin_lock(&req
->waitq
.lock
);
716 if (test_bit(FR_ABORTED
, &req
->flags
))
719 set_bit(FR_LOCKED
, &req
->flags
);
720 spin_unlock(&req
->waitq
.lock
);
726 * Unlock request. If it was aborted while locked, caller is responsible
727 * for unlocking and ending the request.
729 static int unlock_request(struct fuse_req
*req
)
733 spin_lock(&req
->waitq
.lock
);
734 if (test_bit(FR_ABORTED
, &req
->flags
))
737 clear_bit(FR_LOCKED
, &req
->flags
);
738 spin_unlock(&req
->waitq
.lock
);
743 struct fuse_copy_state
{
745 struct fuse_req
*req
;
746 struct iov_iter
*iter
;
747 struct pipe_buffer
*pipebufs
;
748 struct pipe_buffer
*currbuf
;
749 struct pipe_inode_info
*pipe
;
750 unsigned long nr_segs
;
754 unsigned move_pages
:1;
757 static void fuse_copy_init(struct fuse_copy_state
*cs
, int write
,
758 struct iov_iter
*iter
)
760 memset(cs
, 0, sizeof(*cs
));
765 /* Unmap and put previous page of userspace buffer */
766 static void fuse_copy_finish(struct fuse_copy_state
*cs
)
769 struct pipe_buffer
*buf
= cs
->currbuf
;
772 buf
->len
= PAGE_SIZE
- cs
->len
;
776 flush_dcache_page(cs
->pg
);
777 set_page_dirty_lock(cs
->pg
);
785 * Get another pagefull of userspace buffer, and map it to kernel
786 * address space, and lock request
788 static int fuse_copy_fill(struct fuse_copy_state
*cs
)
793 err
= unlock_request(cs
->req
);
797 fuse_copy_finish(cs
);
799 struct pipe_buffer
*buf
= cs
->pipebufs
;
802 err
= pipe_buf_confirm(cs
->pipe
, buf
);
806 BUG_ON(!cs
->nr_segs
);
809 cs
->offset
= buf
->offset
;
814 if (cs
->nr_segs
== cs
->pipe
->buffers
)
817 page
= alloc_page(GFP_HIGHUSER
);
834 err
= iov_iter_get_pages(cs
->iter
, &page
, PAGE_SIZE
, 1, &off
);
841 iov_iter_advance(cs
->iter
, err
);
844 return lock_request(cs
->req
);
847 /* Do as much copy to/from userspace buffer as we can */
848 static int fuse_copy_do(struct fuse_copy_state
*cs
, void **val
, unsigned *size
)
850 unsigned ncpy
= min(*size
, cs
->len
);
852 void *pgaddr
= kmap_atomic(cs
->pg
);
853 void *buf
= pgaddr
+ cs
->offset
;
856 memcpy(buf
, *val
, ncpy
);
858 memcpy(*val
, buf
, ncpy
);
860 kunmap_atomic(pgaddr
);
869 static int fuse_check_page(struct page
*page
)
871 if (page_mapcount(page
) ||
872 page
->mapping
!= NULL
||
873 page_count(page
) != 1 ||
874 (page
->flags
& PAGE_FLAGS_CHECK_AT_PREP
&
881 printk(KERN_WARNING
"fuse: trying to steal weird page\n");
882 printk(KERN_WARNING
" page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page
, page
->index
, page
->flags
, page_count(page
), page_mapcount(page
), page
->mapping
);
888 static int fuse_try_move_page(struct fuse_copy_state
*cs
, struct page
**pagep
)
891 struct page
*oldpage
= *pagep
;
892 struct page
*newpage
;
893 struct pipe_buffer
*buf
= cs
->pipebufs
;
895 err
= unlock_request(cs
->req
);
899 fuse_copy_finish(cs
);
901 err
= pipe_buf_confirm(cs
->pipe
, buf
);
905 BUG_ON(!cs
->nr_segs
);
911 if (cs
->len
!= PAGE_SIZE
)
914 if (pipe_buf_steal(cs
->pipe
, buf
) != 0)
919 if (!PageUptodate(newpage
))
920 SetPageUptodate(newpage
);
922 ClearPageMappedToDisk(newpage
);
924 if (fuse_check_page(newpage
) != 0)
925 goto out_fallback_unlock
;
928 * This is a new and locked page, it shouldn't be mapped or
929 * have any special flags on it
931 if (WARN_ON(page_mapped(oldpage
)))
932 goto out_fallback_unlock
;
933 if (WARN_ON(page_has_private(oldpage
)))
934 goto out_fallback_unlock
;
935 if (WARN_ON(PageDirty(oldpage
) || PageWriteback(oldpage
)))
936 goto out_fallback_unlock
;
937 if (WARN_ON(PageMlocked(oldpage
)))
938 goto out_fallback_unlock
;
940 err
= replace_page_cache_page(oldpage
, newpage
, GFP_KERNEL
);
942 unlock_page(newpage
);
948 if (!(buf
->flags
& PIPE_BUF_FLAG_LRU
))
949 lru_cache_add_file(newpage
);
952 spin_lock(&cs
->req
->waitq
.lock
);
953 if (test_bit(FR_ABORTED
, &cs
->req
->flags
))
957 spin_unlock(&cs
->req
->waitq
.lock
);
960 unlock_page(newpage
);
965 unlock_page(oldpage
);
972 unlock_page(newpage
);
975 cs
->offset
= buf
->offset
;
977 err
= lock_request(cs
->req
);
984 static int fuse_ref_page(struct fuse_copy_state
*cs
, struct page
*page
,
985 unsigned offset
, unsigned count
)
987 struct pipe_buffer
*buf
;
990 if (cs
->nr_segs
== cs
->pipe
->buffers
)
993 err
= unlock_request(cs
->req
);
997 fuse_copy_finish(cs
);
1002 buf
->offset
= offset
;
1013 * Copy a page in the request to/from the userspace buffer. Must be
1016 static int fuse_copy_page(struct fuse_copy_state
*cs
, struct page
**pagep
,
1017 unsigned offset
, unsigned count
, int zeroing
)
1020 struct page
*page
= *pagep
;
1022 if (page
&& zeroing
&& count
< PAGE_SIZE
)
1023 clear_highpage(page
);
1026 if (cs
->write
&& cs
->pipebufs
&& page
) {
1027 return fuse_ref_page(cs
, page
, offset
, count
);
1028 } else if (!cs
->len
) {
1029 if (cs
->move_pages
&& page
&&
1030 offset
== 0 && count
== PAGE_SIZE
) {
1031 err
= fuse_try_move_page(cs
, pagep
);
1035 err
= fuse_copy_fill(cs
);
1041 void *mapaddr
= kmap_atomic(page
);
1042 void *buf
= mapaddr
+ offset
;
1043 offset
+= fuse_copy_do(cs
, &buf
, &count
);
1044 kunmap_atomic(mapaddr
);
1046 offset
+= fuse_copy_do(cs
, NULL
, &count
);
1048 if (page
&& !cs
->write
)
1049 flush_dcache_page(page
);
1053 /* Copy pages in the request to/from userspace buffer */
1054 static int fuse_copy_pages(struct fuse_copy_state
*cs
, unsigned nbytes
,
1058 struct fuse_req
*req
= cs
->req
;
1060 for (i
= 0; i
< req
->num_pages
&& (nbytes
|| zeroing
); i
++) {
1062 unsigned offset
= req
->page_descs
[i
].offset
;
1063 unsigned count
= min(nbytes
, req
->page_descs
[i
].length
);
1065 err
= fuse_copy_page(cs
, &req
->pages
[i
], offset
, count
,
1075 /* Copy a single argument in the request to/from userspace buffer */
1076 static int fuse_copy_one(struct fuse_copy_state
*cs
, void *val
, unsigned size
)
1080 int err
= fuse_copy_fill(cs
);
1084 fuse_copy_do(cs
, &val
, &size
);
1089 /* Copy request arguments to/from userspace buffer */
1090 static int fuse_copy_args(struct fuse_copy_state
*cs
, unsigned numargs
,
1091 unsigned argpages
, struct fuse_arg
*args
,
1097 for (i
= 0; !err
&& i
< numargs
; i
++) {
1098 struct fuse_arg
*arg
= &args
[i
];
1099 if (i
== numargs
- 1 && argpages
)
1100 err
= fuse_copy_pages(cs
, arg
->size
, zeroing
);
1102 err
= fuse_copy_one(cs
, arg
->value
, arg
->size
);
1107 static int forget_pending(struct fuse_iqueue
*fiq
)
1109 return fiq
->forget_list_head
.next
!= NULL
;
1112 static int request_pending(struct fuse_iqueue
*fiq
)
1114 return !list_empty(&fiq
->pending
) || !list_empty(&fiq
->interrupts
) ||
1115 forget_pending(fiq
);
1119 * Transfer an interrupt request to userspace
1121 * Unlike other requests this is assembled on demand, without a need
1122 * to allocate a separate fuse_req structure.
1124 * Called with fiq->waitq.lock held, releases it
1126 static int fuse_read_interrupt(struct fuse_iqueue
*fiq
,
1127 struct fuse_copy_state
*cs
,
1128 size_t nbytes
, struct fuse_req
*req
)
1129 __releases(fiq
->waitq
.lock
)
1131 struct fuse_in_header ih
;
1132 struct fuse_interrupt_in arg
;
1133 unsigned reqsize
= sizeof(ih
) + sizeof(arg
);
1136 list_del_init(&req
->intr_entry
);
1137 memset(&ih
, 0, sizeof(ih
));
1138 memset(&arg
, 0, sizeof(arg
));
1140 ih
.opcode
= FUSE_INTERRUPT
;
1141 ih
.unique
= (req
->in
.h
.unique
| FUSE_INT_REQ_BIT
);
1142 arg
.unique
= req
->in
.h
.unique
;
1144 spin_unlock(&fiq
->waitq
.lock
);
1145 if (nbytes
< reqsize
)
1148 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1150 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1151 fuse_copy_finish(cs
);
1153 return err
? err
: reqsize
;
1156 static struct fuse_forget_link
*dequeue_forget(struct fuse_iqueue
*fiq
,
1160 struct fuse_forget_link
*head
= fiq
->forget_list_head
.next
;
1161 struct fuse_forget_link
**newhead
= &head
;
1164 for (count
= 0; *newhead
!= NULL
&& count
< max
; count
++)
1165 newhead
= &(*newhead
)->next
;
1167 fiq
->forget_list_head
.next
= *newhead
;
1169 if (fiq
->forget_list_head
.next
== NULL
)
1170 fiq
->forget_list_tail
= &fiq
->forget_list_head
;
1178 static int fuse_read_single_forget(struct fuse_iqueue
*fiq
,
1179 struct fuse_copy_state
*cs
,
1181 __releases(fiq
->waitq
.lock
)
1184 struct fuse_forget_link
*forget
= dequeue_forget(fiq
, 1, NULL
);
1185 struct fuse_forget_in arg
= {
1186 .nlookup
= forget
->forget_one
.nlookup
,
1188 struct fuse_in_header ih
= {
1189 .opcode
= FUSE_FORGET
,
1190 .nodeid
= forget
->forget_one
.nodeid
,
1191 .unique
= fuse_get_unique(fiq
),
1192 .len
= sizeof(ih
) + sizeof(arg
),
1195 spin_unlock(&fiq
->waitq
.lock
);
1197 if (nbytes
< ih
.len
)
1200 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1202 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1203 fuse_copy_finish(cs
);
1211 static int fuse_read_batch_forget(struct fuse_iqueue
*fiq
,
1212 struct fuse_copy_state
*cs
, size_t nbytes
)
1213 __releases(fiq
->waitq
.lock
)
1216 unsigned max_forgets
;
1218 struct fuse_forget_link
*head
;
1219 struct fuse_batch_forget_in arg
= { .count
= 0 };
1220 struct fuse_in_header ih
= {
1221 .opcode
= FUSE_BATCH_FORGET
,
1222 .unique
= fuse_get_unique(fiq
),
1223 .len
= sizeof(ih
) + sizeof(arg
),
1226 if (nbytes
< ih
.len
) {
1227 spin_unlock(&fiq
->waitq
.lock
);
1231 max_forgets
= (nbytes
- ih
.len
) / sizeof(struct fuse_forget_one
);
1232 head
= dequeue_forget(fiq
, max_forgets
, &count
);
1233 spin_unlock(&fiq
->waitq
.lock
);
1236 ih
.len
+= count
* sizeof(struct fuse_forget_one
);
1237 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1239 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1242 struct fuse_forget_link
*forget
= head
;
1245 err
= fuse_copy_one(cs
, &forget
->forget_one
,
1246 sizeof(forget
->forget_one
));
1248 head
= forget
->next
;
1252 fuse_copy_finish(cs
);
1260 static int fuse_read_forget(struct fuse_conn
*fc
, struct fuse_iqueue
*fiq
,
1261 struct fuse_copy_state
*cs
,
1263 __releases(fiq
->waitq
.lock
)
1265 if (fc
->minor
< 16 || fiq
->forget_list_head
.next
->next
== NULL
)
1266 return fuse_read_single_forget(fiq
, cs
, nbytes
);
1268 return fuse_read_batch_forget(fiq
, cs
, nbytes
);
1272 * Read a single request into the userspace filesystem's buffer. This
1273 * function waits until a request is available, then removes it from
1274 * the pending list and copies request data to userspace buffer. If
1275 * no reply is needed (FORGET) or request has been aborted or there
1276 * was an error during the copying then it's finished by calling
1277 * request_end(). Otherwise add it to the processing list, and set
1280 static ssize_t
fuse_dev_do_read(struct fuse_dev
*fud
, struct file
*file
,
1281 struct fuse_copy_state
*cs
, size_t nbytes
)
1284 struct fuse_conn
*fc
= fud
->fc
;
1285 struct fuse_iqueue
*fiq
= &fc
->iq
;
1286 struct fuse_pqueue
*fpq
= &fud
->pq
;
1287 struct fuse_req
*req
;
1293 spin_lock(&fiq
->waitq
.lock
);
1295 if ((file
->f_flags
& O_NONBLOCK
) && fiq
->connected
&&
1296 !request_pending(fiq
))
1299 err
= wait_event_interruptible_exclusive_locked(fiq
->waitq
,
1300 !fiq
->connected
|| request_pending(fiq
));
1304 if (!fiq
->connected
) {
1305 err
= (fc
->aborted
&& fc
->abort_err
) ? -ECONNABORTED
: -ENODEV
;
1309 if (!list_empty(&fiq
->interrupts
)) {
1310 req
= list_entry(fiq
->interrupts
.next
, struct fuse_req
,
1312 return fuse_read_interrupt(fiq
, cs
, nbytes
, req
);
1315 if (forget_pending(fiq
)) {
1316 if (list_empty(&fiq
->pending
) || fiq
->forget_batch
-- > 0)
1317 return fuse_read_forget(fc
, fiq
, cs
, nbytes
);
1319 if (fiq
->forget_batch
<= -8)
1320 fiq
->forget_batch
= 16;
1323 req
= list_entry(fiq
->pending
.next
, struct fuse_req
, list
);
1324 clear_bit(FR_PENDING
, &req
->flags
);
1325 list_del_init(&req
->list
);
1326 spin_unlock(&fiq
->waitq
.lock
);
1329 reqsize
= in
->h
.len
;
1331 /* If request is too large, reply with an error and restart the read */
1332 if (nbytes
< reqsize
) {
1333 req
->out
.h
.error
= -EIO
;
1334 /* SETXATTR is special, since it may contain too large data */
1335 if (in
->h
.opcode
== FUSE_SETXATTR
)
1336 req
->out
.h
.error
= -E2BIG
;
1337 request_end(fc
, req
);
1340 spin_lock(&fpq
->lock
);
1341 list_add(&req
->list
, &fpq
->io
);
1342 spin_unlock(&fpq
->lock
);
1344 err
= fuse_copy_one(cs
, &in
->h
, sizeof(in
->h
));
1346 err
= fuse_copy_args(cs
, in
->numargs
, in
->argpages
,
1347 (struct fuse_arg
*) in
->args
, 0);
1348 fuse_copy_finish(cs
);
1349 spin_lock(&fpq
->lock
);
1350 clear_bit(FR_LOCKED
, &req
->flags
);
1351 if (!fpq
->connected
) {
1352 err
= (fc
->aborted
&& fc
->abort_err
) ? -ECONNABORTED
: -ENODEV
;
1356 req
->out
.h
.error
= -EIO
;
1359 if (!test_bit(FR_ISREPLY
, &req
->flags
)) {
1363 hash
= fuse_req_hash(req
->in
.h
.unique
);
1364 list_move_tail(&req
->list
, &fpq
->processing
[hash
]);
1365 __fuse_get_request(req
);
1366 set_bit(FR_SENT
, &req
->flags
);
1367 spin_unlock(&fpq
->lock
);
1368 /* matches barrier in request_wait_answer() */
1369 smp_mb__after_atomic();
1370 if (test_bit(FR_INTERRUPTED
, &req
->flags
))
1371 queue_interrupt(fiq
, req
);
1372 fuse_put_request(fc
, req
);
1377 if (!test_bit(FR_PRIVATE
, &req
->flags
))
1378 list_del_init(&req
->list
);
1379 spin_unlock(&fpq
->lock
);
1380 request_end(fc
, req
);
1384 spin_unlock(&fiq
->waitq
.lock
);
1388 static int fuse_dev_open(struct inode
*inode
, struct file
*file
)
1391 * The fuse device's file's private_data is used to hold
1392 * the fuse_conn(ection) when it is mounted, and is used to
1393 * keep track of whether the file has been mounted already.
1395 file
->private_data
= NULL
;
1399 static ssize_t
fuse_dev_read(struct kiocb
*iocb
, struct iov_iter
*to
)
1401 struct fuse_copy_state cs
;
1402 struct file
*file
= iocb
->ki_filp
;
1403 struct fuse_dev
*fud
= fuse_get_dev(file
);
1408 if (!iter_is_iovec(to
))
1411 fuse_copy_init(&cs
, 1, to
);
1413 return fuse_dev_do_read(fud
, file
, &cs
, iov_iter_count(to
));
1416 static ssize_t
fuse_dev_splice_read(struct file
*in
, loff_t
*ppos
,
1417 struct pipe_inode_info
*pipe
,
1418 size_t len
, unsigned int flags
)
1422 struct pipe_buffer
*bufs
;
1423 struct fuse_copy_state cs
;
1424 struct fuse_dev
*fud
= fuse_get_dev(in
);
1429 bufs
= kvmalloc_array(pipe
->buffers
, sizeof(struct pipe_buffer
),
1434 fuse_copy_init(&cs
, 1, NULL
);
1437 ret
= fuse_dev_do_read(fud
, in
, &cs
, len
);
1441 if (pipe
->nrbufs
+ cs
.nr_segs
> pipe
->buffers
) {
1446 for (ret
= total
= 0; page_nr
< cs
.nr_segs
; total
+= ret
) {
1448 * Need to be careful about this. Having buf->ops in module
1449 * code can Oops if the buffer persists after module unload.
1451 bufs
[page_nr
].ops
= &nosteal_pipe_buf_ops
;
1452 bufs
[page_nr
].flags
= 0;
1453 ret
= add_to_pipe(pipe
, &bufs
[page_nr
++]);
1454 if (unlikely(ret
< 0))
1460 for (; page_nr
< cs
.nr_segs
; page_nr
++)
1461 put_page(bufs
[page_nr
].page
);
1467 static int fuse_notify_poll(struct fuse_conn
*fc
, unsigned int size
,
1468 struct fuse_copy_state
*cs
)
1470 struct fuse_notify_poll_wakeup_out outarg
;
1473 if (size
!= sizeof(outarg
))
1476 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1480 fuse_copy_finish(cs
);
1481 return fuse_notify_poll_wakeup(fc
, &outarg
);
1484 fuse_copy_finish(cs
);
1488 static int fuse_notify_inval_inode(struct fuse_conn
*fc
, unsigned int size
,
1489 struct fuse_copy_state
*cs
)
1491 struct fuse_notify_inval_inode_out outarg
;
1494 if (size
!= sizeof(outarg
))
1497 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1500 fuse_copy_finish(cs
);
1502 down_read(&fc
->killsb
);
1505 err
= fuse_reverse_inval_inode(fc
->sb
, outarg
.ino
,
1506 outarg
.off
, outarg
.len
);
1508 up_read(&fc
->killsb
);
1512 fuse_copy_finish(cs
);
1516 static int fuse_notify_inval_entry(struct fuse_conn
*fc
, unsigned int size
,
1517 struct fuse_copy_state
*cs
)
1519 struct fuse_notify_inval_entry_out outarg
;
1524 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1529 if (size
< sizeof(outarg
))
1532 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1536 err
= -ENAMETOOLONG
;
1537 if (outarg
.namelen
> FUSE_NAME_MAX
)
1541 if (size
!= sizeof(outarg
) + outarg
.namelen
+ 1)
1545 name
.len
= outarg
.namelen
;
1546 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1549 fuse_copy_finish(cs
);
1550 buf
[outarg
.namelen
] = 0;
1552 down_read(&fc
->killsb
);
1555 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
, 0, &name
);
1556 up_read(&fc
->killsb
);
1562 fuse_copy_finish(cs
);
1566 static int fuse_notify_delete(struct fuse_conn
*fc
, unsigned int size
,
1567 struct fuse_copy_state
*cs
)
1569 struct fuse_notify_delete_out outarg
;
1574 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1579 if (size
< sizeof(outarg
))
1582 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1586 err
= -ENAMETOOLONG
;
1587 if (outarg
.namelen
> FUSE_NAME_MAX
)
1591 if (size
!= sizeof(outarg
) + outarg
.namelen
+ 1)
1595 name
.len
= outarg
.namelen
;
1596 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1599 fuse_copy_finish(cs
);
1600 buf
[outarg
.namelen
] = 0;
1602 down_read(&fc
->killsb
);
1605 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
,
1606 outarg
.child
, &name
);
1607 up_read(&fc
->killsb
);
1613 fuse_copy_finish(cs
);
1617 static int fuse_notify_store(struct fuse_conn
*fc
, unsigned int size
,
1618 struct fuse_copy_state
*cs
)
1620 struct fuse_notify_store_out outarg
;
1621 struct inode
*inode
;
1622 struct address_space
*mapping
;
1626 unsigned int offset
;
1632 if (size
< sizeof(outarg
))
1635 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1640 if (size
- sizeof(outarg
) != outarg
.size
)
1643 nodeid
= outarg
.nodeid
;
1645 down_read(&fc
->killsb
);
1651 inode
= ilookup5(fc
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
1655 mapping
= inode
->i_mapping
;
1656 index
= outarg
.offset
>> PAGE_SHIFT
;
1657 offset
= outarg
.offset
& ~PAGE_MASK
;
1658 file_size
= i_size_read(inode
);
1659 end
= outarg
.offset
+ outarg
.size
;
1660 if (end
> file_size
) {
1662 fuse_write_update_size(inode
, file_size
);
1668 unsigned int this_num
;
1671 page
= find_or_create_page(mapping
, index
,
1672 mapping_gfp_mask(mapping
));
1676 this_num
= min_t(unsigned, num
, PAGE_SIZE
- offset
);
1677 err
= fuse_copy_page(cs
, &page
, offset
, this_num
, 0);
1678 if (!err
&& offset
== 0 &&
1679 (this_num
== PAGE_SIZE
|| file_size
== end
))
1680 SetPageUptodate(page
);
1697 up_read(&fc
->killsb
);
1699 fuse_copy_finish(cs
);
1703 static void fuse_retrieve_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
1705 release_pages(req
->pages
, req
->num_pages
);
1708 static int fuse_retrieve(struct fuse_conn
*fc
, struct inode
*inode
,
1709 struct fuse_notify_retrieve_out
*outarg
)
1712 struct address_space
*mapping
= inode
->i_mapping
;
1713 struct fuse_req
*req
;
1717 unsigned int offset
;
1718 size_t total_len
= 0;
1719 unsigned int num_pages
;
1721 offset
= outarg
->offset
& ~PAGE_MASK
;
1722 file_size
= i_size_read(inode
);
1725 if (outarg
->offset
> file_size
)
1727 else if (outarg
->offset
+ num
> file_size
)
1728 num
= file_size
- outarg
->offset
;
1730 num_pages
= (num
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1731 num_pages
= min(num_pages
, fc
->max_pages
);
1733 req
= fuse_get_req(fc
, num_pages
);
1735 return PTR_ERR(req
);
1737 req
->in
.h
.opcode
= FUSE_NOTIFY_REPLY
;
1738 req
->in
.h
.nodeid
= outarg
->nodeid
;
1739 req
->in
.numargs
= 2;
1740 req
->in
.argpages
= 1;
1741 req
->page_descs
[0].offset
= offset
;
1742 req
->end
= fuse_retrieve_end
;
1744 index
= outarg
->offset
>> PAGE_SHIFT
;
1746 while (num
&& req
->num_pages
< num_pages
) {
1748 unsigned int this_num
;
1750 page
= find_get_page(mapping
, index
);
1754 this_num
= min_t(unsigned, num
, PAGE_SIZE
- offset
);
1755 req
->pages
[req
->num_pages
] = page
;
1756 req
->page_descs
[req
->num_pages
].length
= this_num
;
1761 total_len
+= this_num
;
1764 req
->misc
.retrieve_in
.offset
= outarg
->offset
;
1765 req
->misc
.retrieve_in
.size
= total_len
;
1766 req
->in
.args
[0].size
= sizeof(req
->misc
.retrieve_in
);
1767 req
->in
.args
[0].value
= &req
->misc
.retrieve_in
;
1768 req
->in
.args
[1].size
= total_len
;
1770 err
= fuse_request_send_notify_reply(fc
, req
, outarg
->notify_unique
);
1772 fuse_retrieve_end(fc
, req
);
1777 static int fuse_notify_retrieve(struct fuse_conn
*fc
, unsigned int size
,
1778 struct fuse_copy_state
*cs
)
1780 struct fuse_notify_retrieve_out outarg
;
1781 struct inode
*inode
;
1785 if (size
!= sizeof(outarg
))
1788 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1792 fuse_copy_finish(cs
);
1794 down_read(&fc
->killsb
);
1797 u64 nodeid
= outarg
.nodeid
;
1799 inode
= ilookup5(fc
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
1801 err
= fuse_retrieve(fc
, inode
, &outarg
);
1805 up_read(&fc
->killsb
);
1810 fuse_copy_finish(cs
);
1814 static int fuse_notify(struct fuse_conn
*fc
, enum fuse_notify_code code
,
1815 unsigned int size
, struct fuse_copy_state
*cs
)
1817 /* Don't try to move pages (yet) */
1821 case FUSE_NOTIFY_POLL
:
1822 return fuse_notify_poll(fc
, size
, cs
);
1824 case FUSE_NOTIFY_INVAL_INODE
:
1825 return fuse_notify_inval_inode(fc
, size
, cs
);
1827 case FUSE_NOTIFY_INVAL_ENTRY
:
1828 return fuse_notify_inval_entry(fc
, size
, cs
);
1830 case FUSE_NOTIFY_STORE
:
1831 return fuse_notify_store(fc
, size
, cs
);
1833 case FUSE_NOTIFY_RETRIEVE
:
1834 return fuse_notify_retrieve(fc
, size
, cs
);
1836 case FUSE_NOTIFY_DELETE
:
1837 return fuse_notify_delete(fc
, size
, cs
);
1840 fuse_copy_finish(cs
);
1845 /* Look up request on processing list by unique ID */
1846 static struct fuse_req
*request_find(struct fuse_pqueue
*fpq
, u64 unique
)
1848 unsigned int hash
= fuse_req_hash(unique
);
1849 struct fuse_req
*req
;
1851 list_for_each_entry(req
, &fpq
->processing
[hash
], list
) {
1852 if (req
->in
.h
.unique
== unique
)
1858 static int copy_out_args(struct fuse_copy_state
*cs
, struct fuse_out
*out
,
1861 unsigned reqsize
= sizeof(struct fuse_out_header
);
1864 return nbytes
!= reqsize
? -EINVAL
: 0;
1866 reqsize
+= len_args(out
->numargs
, out
->args
);
1868 if (reqsize
< nbytes
|| (reqsize
> nbytes
&& !out
->argvar
))
1870 else if (reqsize
> nbytes
) {
1871 struct fuse_arg
*lastarg
= &out
->args
[out
->numargs
-1];
1872 unsigned diffsize
= reqsize
- nbytes
;
1873 if (diffsize
> lastarg
->size
)
1875 lastarg
->size
-= diffsize
;
1877 return fuse_copy_args(cs
, out
->numargs
, out
->argpages
, out
->args
,
1882 * Write a single reply to a request. First the header is copied from
1883 * the write buffer. The request is then searched on the processing
1884 * list by the unique ID found in the header. If found, then remove
1885 * it from the list and copy the rest of the buffer to the request.
1886 * The request is finished by calling request_end()
1888 static ssize_t
fuse_dev_do_write(struct fuse_dev
*fud
,
1889 struct fuse_copy_state
*cs
, size_t nbytes
)
1892 struct fuse_conn
*fc
= fud
->fc
;
1893 struct fuse_pqueue
*fpq
= &fud
->pq
;
1894 struct fuse_req
*req
;
1895 struct fuse_out_header oh
;
1897 if (nbytes
< sizeof(struct fuse_out_header
))
1900 err
= fuse_copy_one(cs
, &oh
, sizeof(oh
));
1905 if (oh
.len
!= nbytes
)
1909 * Zero oh.unique indicates unsolicited notification message
1910 * and error contains notification code.
1913 err
= fuse_notify(fc
, oh
.error
, nbytes
- sizeof(oh
), cs
);
1914 return err
? err
: nbytes
;
1918 if (oh
.error
<= -1000 || oh
.error
> 0)
1921 spin_lock(&fpq
->lock
);
1923 if (!fpq
->connected
)
1926 req
= request_find(fpq
, oh
.unique
& ~FUSE_INT_REQ_BIT
);
1930 /* Is it an interrupt reply ID? */
1931 if (oh
.unique
& FUSE_INT_REQ_BIT
) {
1932 __fuse_get_request(req
);
1933 spin_unlock(&fpq
->lock
);
1936 if (nbytes
!= sizeof(struct fuse_out_header
)) {
1937 fuse_put_request(fc
, req
);
1941 if (oh
.error
== -ENOSYS
)
1942 fc
->no_interrupt
= 1;
1943 else if (oh
.error
== -EAGAIN
)
1944 queue_interrupt(&fc
->iq
, req
);
1945 fuse_put_request(fc
, req
);
1947 fuse_copy_finish(cs
);
1951 clear_bit(FR_SENT
, &req
->flags
);
1952 list_move(&req
->list
, &fpq
->io
);
1954 set_bit(FR_LOCKED
, &req
->flags
);
1955 spin_unlock(&fpq
->lock
);
1957 if (!req
->out
.page_replace
)
1960 err
= copy_out_args(cs
, &req
->out
, nbytes
);
1961 fuse_copy_finish(cs
);
1963 spin_lock(&fpq
->lock
);
1964 clear_bit(FR_LOCKED
, &req
->flags
);
1965 if (!fpq
->connected
)
1968 req
->out
.h
.error
= -EIO
;
1969 if (!test_bit(FR_PRIVATE
, &req
->flags
))
1970 list_del_init(&req
->list
);
1971 spin_unlock(&fpq
->lock
);
1973 request_end(fc
, req
);
1975 return err
? err
: nbytes
;
1978 spin_unlock(&fpq
->lock
);
1980 fuse_copy_finish(cs
);
1984 static ssize_t
fuse_dev_write(struct kiocb
*iocb
, struct iov_iter
*from
)
1986 struct fuse_copy_state cs
;
1987 struct fuse_dev
*fud
= fuse_get_dev(iocb
->ki_filp
);
1992 if (!iter_is_iovec(from
))
1995 fuse_copy_init(&cs
, 0, from
);
1997 return fuse_dev_do_write(fud
, &cs
, iov_iter_count(from
));
2000 static ssize_t
fuse_dev_splice_write(struct pipe_inode_info
*pipe
,
2001 struct file
*out
, loff_t
*ppos
,
2002 size_t len
, unsigned int flags
)
2006 struct pipe_buffer
*bufs
;
2007 struct fuse_copy_state cs
;
2008 struct fuse_dev
*fud
;
2012 fud
= fuse_get_dev(out
);
2018 bufs
= kvmalloc_array(pipe
->nrbufs
, sizeof(struct pipe_buffer
),
2027 for (idx
= 0; idx
< pipe
->nrbufs
&& rem
< len
; idx
++)
2028 rem
+= pipe
->bufs
[(pipe
->curbuf
+ idx
) & (pipe
->buffers
- 1)].len
;
2038 struct pipe_buffer
*ibuf
;
2039 struct pipe_buffer
*obuf
;
2041 BUG_ON(nbuf
>= pipe
->buffers
);
2042 BUG_ON(!pipe
->nrbufs
);
2043 ibuf
= &pipe
->bufs
[pipe
->curbuf
];
2046 if (rem
>= ibuf
->len
) {
2049 pipe
->curbuf
= (pipe
->curbuf
+ 1) & (pipe
->buffers
- 1);
2052 pipe_buf_get(pipe
, ibuf
);
2054 obuf
->flags
&= ~PIPE_BUF_FLAG_GIFT
;
2056 ibuf
->offset
+= obuf
->len
;
2057 ibuf
->len
-= obuf
->len
;
2064 fuse_copy_init(&cs
, 0, NULL
);
2069 if (flags
& SPLICE_F_MOVE
)
2072 ret
= fuse_dev_do_write(fud
, &cs
, len
);
2074 for (idx
= 0; idx
< nbuf
; idx
++)
2075 pipe_buf_release(pipe
, &bufs
[idx
]);
2082 static __poll_t
fuse_dev_poll(struct file
*file
, poll_table
*wait
)
2084 __poll_t mask
= EPOLLOUT
| EPOLLWRNORM
;
2085 struct fuse_iqueue
*fiq
;
2086 struct fuse_dev
*fud
= fuse_get_dev(file
);
2092 poll_wait(file
, &fiq
->waitq
, wait
);
2094 spin_lock(&fiq
->waitq
.lock
);
2095 if (!fiq
->connected
)
2097 else if (request_pending(fiq
))
2098 mask
|= EPOLLIN
| EPOLLRDNORM
;
2099 spin_unlock(&fiq
->waitq
.lock
);
2105 * Abort all requests on the given list (pending or processing)
2107 * This function releases and reacquires fc->lock
2109 static void end_requests(struct fuse_conn
*fc
, struct list_head
*head
)
2111 while (!list_empty(head
)) {
2112 struct fuse_req
*req
;
2113 req
= list_entry(head
->next
, struct fuse_req
, list
);
2114 req
->out
.h
.error
= -ECONNABORTED
;
2115 clear_bit(FR_SENT
, &req
->flags
);
2116 list_del_init(&req
->list
);
2117 request_end(fc
, req
);
2121 static void end_polls(struct fuse_conn
*fc
)
2125 p
= rb_first(&fc
->polled_files
);
2128 struct fuse_file
*ff
;
2129 ff
= rb_entry(p
, struct fuse_file
, polled_node
);
2130 wake_up_interruptible_all(&ff
->poll_wait
);
2137 * Abort all requests.
2139 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2142 * The same effect is usually achievable through killing the filesystem daemon
2143 * and all users of the filesystem. The exception is the combination of an
2144 * asynchronous request and the tricky deadlock (see
2145 * Documentation/filesystems/fuse.txt).
2147 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2148 * requests, they should be finished off immediately. Locked requests will be
2149 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2150 * requests. It is possible that some request will finish before we can. This
2151 * is OK, the request will in that case be removed from the list before we touch
2154 void fuse_abort_conn(struct fuse_conn
*fc
, bool is_abort
)
2156 struct fuse_iqueue
*fiq
= &fc
->iq
;
2158 spin_lock(&fc
->lock
);
2159 if (fc
->connected
) {
2160 struct fuse_dev
*fud
;
2161 struct fuse_req
*req
, *next
;
2165 /* Background queuing checks fc->connected under bg_lock */
2166 spin_lock(&fc
->bg_lock
);
2168 spin_unlock(&fc
->bg_lock
);
2170 fc
->aborted
= is_abort
;
2171 fuse_set_initialized(fc
);
2172 list_for_each_entry(fud
, &fc
->devices
, entry
) {
2173 struct fuse_pqueue
*fpq
= &fud
->pq
;
2175 spin_lock(&fpq
->lock
);
2177 list_for_each_entry_safe(req
, next
, &fpq
->io
, list
) {
2178 req
->out
.h
.error
= -ECONNABORTED
;
2179 spin_lock(&req
->waitq
.lock
);
2180 set_bit(FR_ABORTED
, &req
->flags
);
2181 if (!test_bit(FR_LOCKED
, &req
->flags
)) {
2182 set_bit(FR_PRIVATE
, &req
->flags
);
2183 __fuse_get_request(req
);
2184 list_move(&req
->list
, &to_end
);
2186 spin_unlock(&req
->waitq
.lock
);
2188 for (i
= 0; i
< FUSE_PQ_HASH_SIZE
; i
++)
2189 list_splice_tail_init(&fpq
->processing
[i
],
2191 spin_unlock(&fpq
->lock
);
2193 spin_lock(&fc
->bg_lock
);
2195 fc
->max_background
= UINT_MAX
;
2197 spin_unlock(&fc
->bg_lock
);
2199 spin_lock(&fiq
->waitq
.lock
);
2201 list_for_each_entry(req
, &fiq
->pending
, list
)
2202 clear_bit(FR_PENDING
, &req
->flags
);
2203 list_splice_tail_init(&fiq
->pending
, &to_end
);
2204 while (forget_pending(fiq
))
2205 kfree(dequeue_forget(fiq
, 1, NULL
));
2206 wake_up_all_locked(&fiq
->waitq
);
2207 spin_unlock(&fiq
->waitq
.lock
);
2208 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
2210 wake_up_all(&fc
->blocked_waitq
);
2211 spin_unlock(&fc
->lock
);
2213 end_requests(fc
, &to_end
);
2215 spin_unlock(&fc
->lock
);
2218 EXPORT_SYMBOL_GPL(fuse_abort_conn
);
2220 void fuse_wait_aborted(struct fuse_conn
*fc
)
2222 wait_event(fc
->blocked_waitq
, atomic_read(&fc
->num_waiting
) == 0);
2225 int fuse_dev_release(struct inode
*inode
, struct file
*file
)
2227 struct fuse_dev
*fud
= fuse_get_dev(file
);
2230 struct fuse_conn
*fc
= fud
->fc
;
2231 struct fuse_pqueue
*fpq
= &fud
->pq
;
2235 spin_lock(&fpq
->lock
);
2236 WARN_ON(!list_empty(&fpq
->io
));
2237 for (i
= 0; i
< FUSE_PQ_HASH_SIZE
; i
++)
2238 list_splice_init(&fpq
->processing
[i
], &to_end
);
2239 spin_unlock(&fpq
->lock
);
2241 end_requests(fc
, &to_end
);
2243 /* Are we the last open device? */
2244 if (atomic_dec_and_test(&fc
->dev_count
)) {
2245 WARN_ON(fc
->iq
.fasync
!= NULL
);
2246 fuse_abort_conn(fc
, false);
2252 EXPORT_SYMBOL_GPL(fuse_dev_release
);
2254 static int fuse_dev_fasync(int fd
, struct file
*file
, int on
)
2256 struct fuse_dev
*fud
= fuse_get_dev(file
);
2261 /* No locking - fasync_helper does its own locking */
2262 return fasync_helper(fd
, file
, on
, &fud
->fc
->iq
.fasync
);
2265 static int fuse_device_clone(struct fuse_conn
*fc
, struct file
*new)
2267 struct fuse_dev
*fud
;
2269 if (new->private_data
)
2272 fud
= fuse_dev_alloc(fc
);
2276 new->private_data
= fud
;
2277 atomic_inc(&fc
->dev_count
);
2282 static long fuse_dev_ioctl(struct file
*file
, unsigned int cmd
,
2287 if (cmd
== FUSE_DEV_IOC_CLONE
) {
2291 if (!get_user(oldfd
, (__u32 __user
*) arg
)) {
2292 struct file
*old
= fget(oldfd
);
2296 struct fuse_dev
*fud
= NULL
;
2299 * Check against file->f_op because CUSE
2300 * uses the same ioctl handler.
2302 if (old
->f_op
== file
->f_op
&&
2303 old
->f_cred
->user_ns
== file
->f_cred
->user_ns
)
2304 fud
= fuse_get_dev(old
);
2307 mutex_lock(&fuse_mutex
);
2308 err
= fuse_device_clone(fud
->fc
, file
);
2309 mutex_unlock(&fuse_mutex
);
2318 const struct file_operations fuse_dev_operations
= {
2319 .owner
= THIS_MODULE
,
2320 .open
= fuse_dev_open
,
2321 .llseek
= no_llseek
,
2322 .read_iter
= fuse_dev_read
,
2323 .splice_read
= fuse_dev_splice_read
,
2324 .write_iter
= fuse_dev_write
,
2325 .splice_write
= fuse_dev_splice_write
,
2326 .poll
= fuse_dev_poll
,
2327 .release
= fuse_dev_release
,
2328 .fasync
= fuse_dev_fasync
,
2329 .unlocked_ioctl
= fuse_dev_ioctl
,
2330 .compat_ioctl
= fuse_dev_ioctl
,
2332 EXPORT_SYMBOL_GPL(fuse_dev_operations
);
2334 static struct miscdevice fuse_miscdevice
= {
2335 .minor
= FUSE_MINOR
,
2337 .fops
= &fuse_dev_operations
,
2340 int __init
fuse_dev_init(void)
2343 fuse_req_cachep
= kmem_cache_create("fuse_request",
2344 sizeof(struct fuse_req
),
2346 if (!fuse_req_cachep
)
2349 err
= misc_register(&fuse_miscdevice
);
2351 goto out_cache_clean
;
2356 kmem_cache_destroy(fuse_req_cachep
);
2361 void fuse_dev_cleanup(void)
2363 misc_deregister(&fuse_miscdevice
);
2364 kmem_cache_destroy(fuse_req_cachep
);