2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/module.h>
16 #include <linux/compat.h>
17 #include <linux/swap.h>
18 #include <linux/aio.h>
19 #include <linux/falloc.h>
21 static const struct file_operations fuse_direct_io_file_operations
;
23 static int fuse_send_open(struct fuse_conn
*fc
, u64 nodeid
, struct file
*file
,
24 int opcode
, struct fuse_open_out
*outargp
)
26 struct fuse_open_in inarg
;
30 req
= fuse_get_req_nopages(fc
);
34 memset(&inarg
, 0, sizeof(inarg
));
35 inarg
.flags
= file
->f_flags
& ~(O_CREAT
| O_EXCL
| O_NOCTTY
);
36 if (!fc
->atomic_o_trunc
)
37 inarg
.flags
&= ~O_TRUNC
;
38 req
->in
.h
.opcode
= opcode
;
39 req
->in
.h
.nodeid
= nodeid
;
41 req
->in
.args
[0].size
= sizeof(inarg
);
42 req
->in
.args
[0].value
= &inarg
;
44 req
->out
.args
[0].size
= sizeof(*outargp
);
45 req
->out
.args
[0].value
= outargp
;
46 fuse_request_send(fc
, req
);
47 err
= req
->out
.h
.error
;
48 fuse_put_request(fc
, req
);
53 struct fuse_file
*fuse_file_alloc(struct fuse_conn
*fc
)
57 ff
= kmalloc(sizeof(struct fuse_file
), GFP_KERNEL
);
62 ff
->reserved_req
= fuse_request_alloc(0);
63 if (unlikely(!ff
->reserved_req
)) {
68 INIT_LIST_HEAD(&ff
->write_entry
);
69 atomic_set(&ff
->count
, 0);
70 RB_CLEAR_NODE(&ff
->polled_node
);
71 init_waitqueue_head(&ff
->poll_wait
);
75 spin_unlock(&fc
->lock
);
80 void fuse_file_free(struct fuse_file
*ff
)
82 fuse_request_free(ff
->reserved_req
);
86 struct fuse_file
*fuse_file_get(struct fuse_file
*ff
)
88 atomic_inc(&ff
->count
);
92 static void fuse_release_async(struct work_struct
*work
)
98 req
= container_of(work
, struct fuse_req
, misc
.release
.work
);
99 path
= req
->misc
.release
.path
;
100 fc
= get_fuse_conn(path
.dentry
->d_inode
);
102 fuse_put_request(fc
, req
);
106 static void fuse_release_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
108 if (fc
->destroy_req
) {
110 * If this is a fuseblk mount, then it's possible that
111 * releasing the path will result in releasing the
112 * super block and sending the DESTROY request. If
113 * the server is single threaded, this would hang.
114 * For this reason do the path_put() in a separate
117 atomic_inc(&req
->count
);
118 INIT_WORK(&req
->misc
.release
.work
, fuse_release_async
);
119 schedule_work(&req
->misc
.release
.work
);
121 path_put(&req
->misc
.release
.path
);
125 static void fuse_file_put(struct fuse_file
*ff
, bool sync
)
127 if (atomic_dec_and_test(&ff
->count
)) {
128 struct fuse_req
*req
= ff
->reserved_req
;
130 if (ff
->fc
->no_open
) {
132 * Drop the release request when client does not
136 path_put(&req
->misc
.release
.path
);
137 fuse_put_request(ff
->fc
, req
);
140 fuse_request_send(ff
->fc
, req
);
141 path_put(&req
->misc
.release
.path
);
142 fuse_put_request(ff
->fc
, req
);
144 req
->end
= fuse_release_end
;
146 fuse_request_send_background(ff
->fc
, req
);
152 int fuse_do_open(struct fuse_conn
*fc
, u64 nodeid
, struct file
*file
,
155 struct fuse_file
*ff
;
156 int opcode
= isdir
? FUSE_OPENDIR
: FUSE_OPEN
;
158 ff
= fuse_file_alloc(fc
);
163 ff
->open_flags
= FOPEN_KEEP_CACHE
; /* Default for no-open */
164 if (!fc
->no_open
|| isdir
) {
165 struct fuse_open_out outarg
;
168 err
= fuse_send_open(fc
, nodeid
, file
, opcode
, &outarg
);
171 ff
->open_flags
= outarg
.open_flags
;
173 } else if (err
!= -ENOSYS
|| isdir
) {
182 ff
->open_flags
&= ~FOPEN_DIRECT_IO
;
185 file
->private_data
= fuse_file_get(ff
);
189 EXPORT_SYMBOL_GPL(fuse_do_open
);
191 static void fuse_link_write_file(struct file
*file
)
193 struct inode
*inode
= file_inode(file
);
194 struct fuse_conn
*fc
= get_fuse_conn(inode
);
195 struct fuse_inode
*fi
= get_fuse_inode(inode
);
196 struct fuse_file
*ff
= file
->private_data
;
198 * file may be written through mmap, so chain it onto the
199 * inodes's write_file list
201 spin_lock(&fc
->lock
);
202 if (list_empty(&ff
->write_entry
))
203 list_add(&ff
->write_entry
, &fi
->write_files
);
204 spin_unlock(&fc
->lock
);
207 void fuse_finish_open(struct inode
*inode
, struct file
*file
)
209 struct fuse_file
*ff
= file
->private_data
;
210 struct fuse_conn
*fc
= get_fuse_conn(inode
);
212 if (ff
->open_flags
& FOPEN_DIRECT_IO
)
213 file
->f_op
= &fuse_direct_io_file_operations
;
214 if (!(ff
->open_flags
& FOPEN_KEEP_CACHE
))
215 invalidate_inode_pages2(inode
->i_mapping
);
216 if (ff
->open_flags
& FOPEN_NONSEEKABLE
)
217 nonseekable_open(inode
, file
);
218 if (fc
->atomic_o_trunc
&& (file
->f_flags
& O_TRUNC
)) {
219 struct fuse_inode
*fi
= get_fuse_inode(inode
);
221 spin_lock(&fc
->lock
);
222 fi
->attr_version
= ++fc
->attr_version
;
223 i_size_write(inode
, 0);
224 spin_unlock(&fc
->lock
);
225 fuse_invalidate_attr(inode
);
226 if (fc
->writeback_cache
)
227 file_update_time(file
);
229 if ((file
->f_mode
& FMODE_WRITE
) && fc
->writeback_cache
)
230 fuse_link_write_file(file
);
233 int fuse_open_common(struct inode
*inode
, struct file
*file
, bool isdir
)
235 struct fuse_conn
*fc
= get_fuse_conn(inode
);
237 bool lock_inode
= (file
->f_flags
& O_TRUNC
) &&
238 fc
->atomic_o_trunc
&&
241 err
= generic_file_open(inode
, file
);
246 mutex_lock(&inode
->i_mutex
);
248 err
= fuse_do_open(fc
, get_node_id(inode
), file
, isdir
);
251 fuse_finish_open(inode
, file
);
254 mutex_unlock(&inode
->i_mutex
);
259 static void fuse_prepare_release(struct fuse_file
*ff
, int flags
, int opcode
)
261 struct fuse_conn
*fc
= ff
->fc
;
262 struct fuse_req
*req
= ff
->reserved_req
;
263 struct fuse_release_in
*inarg
= &req
->misc
.release
.in
;
265 spin_lock(&fc
->lock
);
266 list_del(&ff
->write_entry
);
267 if (!RB_EMPTY_NODE(&ff
->polled_node
))
268 rb_erase(&ff
->polled_node
, &fc
->polled_files
);
269 spin_unlock(&fc
->lock
);
271 wake_up_interruptible_all(&ff
->poll_wait
);
274 inarg
->flags
= flags
;
275 req
->in
.h
.opcode
= opcode
;
276 req
->in
.h
.nodeid
= ff
->nodeid
;
278 req
->in
.args
[0].size
= sizeof(struct fuse_release_in
);
279 req
->in
.args
[0].value
= inarg
;
282 void fuse_release_common(struct file
*file
, int opcode
)
284 struct fuse_file
*ff
;
285 struct fuse_req
*req
;
287 ff
= file
->private_data
;
291 req
= ff
->reserved_req
;
292 fuse_prepare_release(ff
, file
->f_flags
, opcode
);
295 struct fuse_release_in
*inarg
= &req
->misc
.release
.in
;
296 inarg
->release_flags
|= FUSE_RELEASE_FLOCK_UNLOCK
;
297 inarg
->lock_owner
= fuse_lock_owner_id(ff
->fc
,
300 /* Hold vfsmount and dentry until release is finished */
301 path_get(&file
->f_path
);
302 req
->misc
.release
.path
= file
->f_path
;
305 * Normally this will send the RELEASE request, however if
306 * some asynchronous READ or WRITE requests are outstanding,
307 * the sending will be delayed.
309 * Make the release synchronous if this is a fuseblk mount,
310 * synchronous RELEASE is allowed (and desirable) in this case
311 * because the server can be trusted not to screw up.
313 fuse_file_put(ff
, ff
->fc
->destroy_req
!= NULL
);
316 static int fuse_open(struct inode
*inode
, struct file
*file
)
318 return fuse_open_common(inode
, file
, false);
321 static int fuse_release(struct inode
*inode
, struct file
*file
)
323 struct fuse_conn
*fc
= get_fuse_conn(inode
);
325 /* see fuse_vma_close() for !writeback_cache case */
326 if (fc
->writeback_cache
)
327 write_inode_now(inode
, 1);
329 fuse_release_common(file
, FUSE_RELEASE
);
331 /* return value is ignored by VFS */
335 void fuse_sync_release(struct fuse_file
*ff
, int flags
)
337 WARN_ON(atomic_read(&ff
->count
) > 1);
338 fuse_prepare_release(ff
, flags
, FUSE_RELEASE
);
339 ff
->reserved_req
->force
= 1;
340 ff
->reserved_req
->background
= 0;
341 fuse_request_send(ff
->fc
, ff
->reserved_req
);
342 fuse_put_request(ff
->fc
, ff
->reserved_req
);
345 EXPORT_SYMBOL_GPL(fuse_sync_release
);
348 * Scramble the ID space with XTEA, so that the value of the files_struct
349 * pointer is not exposed to userspace.
351 u64
fuse_lock_owner_id(struct fuse_conn
*fc
, fl_owner_t id
)
353 u32
*k
= fc
->scramble_key
;
354 u64 v
= (unsigned long) id
;
360 for (i
= 0; i
< 32; i
++) {
361 v0
+= ((v1
<< 4 ^ v1
>> 5) + v1
) ^ (sum
+ k
[sum
& 3]);
363 v1
+= ((v0
<< 4 ^ v0
>> 5) + v0
) ^ (sum
+ k
[sum
>>11 & 3]);
366 return (u64
) v0
+ ((u64
) v1
<< 32);
370 * Check if any page in a range is under writeback
372 * This is currently done by walking the list of writepage requests
373 * for the inode, which can be pretty inefficient.
375 static bool fuse_range_is_writeback(struct inode
*inode
, pgoff_t idx_from
,
378 struct fuse_conn
*fc
= get_fuse_conn(inode
);
379 struct fuse_inode
*fi
= get_fuse_inode(inode
);
380 struct fuse_req
*req
;
383 spin_lock(&fc
->lock
);
384 list_for_each_entry(req
, &fi
->writepages
, writepages_entry
) {
387 BUG_ON(req
->inode
!= inode
);
388 curr_index
= req
->misc
.write
.in
.offset
>> PAGE_CACHE_SHIFT
;
389 if (idx_from
< curr_index
+ req
->num_pages
&&
390 curr_index
<= idx_to
) {
395 spin_unlock(&fc
->lock
);
400 static inline bool fuse_page_is_writeback(struct inode
*inode
, pgoff_t index
)
402 return fuse_range_is_writeback(inode
, index
, index
);
406 * Wait for page writeback to be completed.
408 * Since fuse doesn't rely on the VM writeback tracking, this has to
409 * use some other means.
411 static int fuse_wait_on_page_writeback(struct inode
*inode
, pgoff_t index
)
413 struct fuse_inode
*fi
= get_fuse_inode(inode
);
415 wait_event(fi
->page_waitq
, !fuse_page_is_writeback(inode
, index
));
420 * Wait for all pending writepages on the inode to finish.
422 * This is currently done by blocking further writes with FUSE_NOWRITE
423 * and waiting for all sent writes to complete.
425 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
426 * could conflict with truncation.
428 static void fuse_sync_writes(struct inode
*inode
)
430 fuse_set_nowrite(inode
);
431 fuse_release_nowrite(inode
);
434 static int fuse_flush(struct file
*file
, fl_owner_t id
)
436 struct inode
*inode
= file_inode(file
);
437 struct fuse_conn
*fc
= get_fuse_conn(inode
);
438 struct fuse_file
*ff
= file
->private_data
;
439 struct fuse_req
*req
;
440 struct fuse_flush_in inarg
;
443 if (is_bad_inode(inode
))
449 err
= write_inode_now(inode
, 1);
453 mutex_lock(&inode
->i_mutex
);
454 fuse_sync_writes(inode
);
455 mutex_unlock(&inode
->i_mutex
);
457 req
= fuse_get_req_nofail_nopages(fc
, file
);
458 memset(&inarg
, 0, sizeof(inarg
));
460 inarg
.lock_owner
= fuse_lock_owner_id(fc
, id
);
461 req
->in
.h
.opcode
= FUSE_FLUSH
;
462 req
->in
.h
.nodeid
= get_node_id(inode
);
464 req
->in
.args
[0].size
= sizeof(inarg
);
465 req
->in
.args
[0].value
= &inarg
;
467 fuse_request_send(fc
, req
);
468 err
= req
->out
.h
.error
;
469 fuse_put_request(fc
, req
);
470 if (err
== -ENOSYS
) {
477 int fuse_fsync_common(struct file
*file
, loff_t start
, loff_t end
,
478 int datasync
, int isdir
)
480 struct inode
*inode
= file
->f_mapping
->host
;
481 struct fuse_conn
*fc
= get_fuse_conn(inode
);
482 struct fuse_file
*ff
= file
->private_data
;
483 struct fuse_req
*req
;
484 struct fuse_fsync_in inarg
;
487 if (is_bad_inode(inode
))
490 mutex_lock(&inode
->i_mutex
);
493 * Start writeback against all dirty pages of the inode, then
494 * wait for all outstanding writes, before sending the FSYNC
497 err
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
501 fuse_sync_writes(inode
);
502 err
= sync_inode_metadata(inode
, 1);
506 if ((!isdir
&& fc
->no_fsync
) || (isdir
&& fc
->no_fsyncdir
))
509 req
= fuse_get_req_nopages(fc
);
515 memset(&inarg
, 0, sizeof(inarg
));
517 inarg
.fsync_flags
= datasync
? 1 : 0;
518 req
->in
.h
.opcode
= isdir
? FUSE_FSYNCDIR
: FUSE_FSYNC
;
519 req
->in
.h
.nodeid
= get_node_id(inode
);
521 req
->in
.args
[0].size
= sizeof(inarg
);
522 req
->in
.args
[0].value
= &inarg
;
523 fuse_request_send(fc
, req
);
524 err
= req
->out
.h
.error
;
525 fuse_put_request(fc
, req
);
526 if (err
== -ENOSYS
) {
534 mutex_unlock(&inode
->i_mutex
);
538 static int fuse_fsync(struct file
*file
, loff_t start
, loff_t end
,
541 return fuse_fsync_common(file
, start
, end
, datasync
, 0);
544 void fuse_read_fill(struct fuse_req
*req
, struct file
*file
, loff_t pos
,
545 size_t count
, int opcode
)
547 struct fuse_read_in
*inarg
= &req
->misc
.read
.in
;
548 struct fuse_file
*ff
= file
->private_data
;
553 inarg
->flags
= file
->f_flags
;
554 req
->in
.h
.opcode
= opcode
;
555 req
->in
.h
.nodeid
= ff
->nodeid
;
557 req
->in
.args
[0].size
= sizeof(struct fuse_read_in
);
558 req
->in
.args
[0].value
= inarg
;
560 req
->out
.numargs
= 1;
561 req
->out
.args
[0].size
= count
;
564 static void fuse_release_user_pages(struct fuse_req
*req
, int write
)
568 for (i
= 0; i
< req
->num_pages
; i
++) {
569 struct page
*page
= req
->pages
[i
];
571 set_page_dirty_lock(page
);
577 * In case of short read, the caller sets 'pos' to the position of
578 * actual end of fuse request in IO request. Otherwise, if bytes_requested
579 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
582 * User requested DIO read of 64K. It was splitted into two 32K fuse requests,
583 * both submitted asynchronously. The first of them was ACKed by userspace as
584 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
585 * second request was ACKed as short, e.g. only 1K was read, resulting in
588 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
589 * will be equal to the length of the longest contiguous fragment of
590 * transferred data starting from the beginning of IO request.
592 static void fuse_aio_complete(struct fuse_io_priv
*io
, int err
, ssize_t pos
)
596 spin_lock(&io
->lock
);
598 io
->err
= io
->err
? : err
;
599 else if (pos
>= 0 && (io
->bytes
< 0 || pos
< io
->bytes
))
603 spin_unlock(&io
->lock
);
610 else if (io
->bytes
>= 0 && io
->write
)
613 res
= io
->bytes
< 0 ? io
->size
: io
->bytes
;
615 if (!is_sync_kiocb(io
->iocb
)) {
616 struct inode
*inode
= file_inode(io
->iocb
->ki_filp
);
617 struct fuse_conn
*fc
= get_fuse_conn(inode
);
618 struct fuse_inode
*fi
= get_fuse_inode(inode
);
620 spin_lock(&fc
->lock
);
621 fi
->attr_version
= ++fc
->attr_version
;
622 spin_unlock(&fc
->lock
);
626 aio_complete(io
->iocb
, res
, 0);
631 static void fuse_aio_complete_req(struct fuse_conn
*fc
, struct fuse_req
*req
)
633 struct fuse_io_priv
*io
= req
->io
;
636 fuse_release_user_pages(req
, !io
->write
);
639 if (req
->misc
.write
.in
.size
!= req
->misc
.write
.out
.size
)
640 pos
= req
->misc
.write
.in
.offset
- io
->offset
+
641 req
->misc
.write
.out
.size
;
643 if (req
->misc
.read
.in
.size
!= req
->out
.args
[0].size
)
644 pos
= req
->misc
.read
.in
.offset
- io
->offset
+
645 req
->out
.args
[0].size
;
648 fuse_aio_complete(io
, req
->out
.h
.error
, pos
);
651 static size_t fuse_async_req_send(struct fuse_conn
*fc
, struct fuse_req
*req
,
652 size_t num_bytes
, struct fuse_io_priv
*io
)
654 spin_lock(&io
->lock
);
655 io
->size
+= num_bytes
;
657 spin_unlock(&io
->lock
);
660 req
->end
= fuse_aio_complete_req
;
662 __fuse_get_request(req
);
663 fuse_request_send_background(fc
, req
);
668 static size_t fuse_send_read(struct fuse_req
*req
, struct fuse_io_priv
*io
,
669 loff_t pos
, size_t count
, fl_owner_t owner
)
671 struct file
*file
= io
->file
;
672 struct fuse_file
*ff
= file
->private_data
;
673 struct fuse_conn
*fc
= ff
->fc
;
675 fuse_read_fill(req
, file
, pos
, count
, FUSE_READ
);
677 struct fuse_read_in
*inarg
= &req
->misc
.read
.in
;
679 inarg
->read_flags
|= FUSE_READ_LOCKOWNER
;
680 inarg
->lock_owner
= fuse_lock_owner_id(fc
, owner
);
684 return fuse_async_req_send(fc
, req
, count
, io
);
686 fuse_request_send(fc
, req
);
687 return req
->out
.args
[0].size
;
690 static void fuse_read_update_size(struct inode
*inode
, loff_t size
,
693 struct fuse_conn
*fc
= get_fuse_conn(inode
);
694 struct fuse_inode
*fi
= get_fuse_inode(inode
);
696 spin_lock(&fc
->lock
);
697 if (attr_ver
== fi
->attr_version
&& size
< inode
->i_size
&&
698 !test_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
)) {
699 fi
->attr_version
= ++fc
->attr_version
;
700 i_size_write(inode
, size
);
702 spin_unlock(&fc
->lock
);
705 static void fuse_short_read(struct fuse_req
*req
, struct inode
*inode
,
708 size_t num_read
= req
->out
.args
[0].size
;
709 struct fuse_conn
*fc
= get_fuse_conn(inode
);
711 if (fc
->writeback_cache
) {
713 * A hole in a file. Some data after the hole are in page cache,
714 * but have not reached the client fs yet. So, the hole is not
718 int start_idx
= num_read
>> PAGE_CACHE_SHIFT
;
719 size_t off
= num_read
& (PAGE_CACHE_SIZE
- 1);
721 for (i
= start_idx
; i
< req
->num_pages
; i
++) {
722 zero_user_segment(req
->pages
[i
], off
, PAGE_CACHE_SIZE
);
726 loff_t pos
= page_offset(req
->pages
[0]) + num_read
;
727 fuse_read_update_size(inode
, pos
, attr_ver
);
731 static int fuse_do_readpage(struct file
*file
, struct page
*page
)
733 struct fuse_io_priv io
= { .async
= 0, .file
= file
};
734 struct inode
*inode
= page
->mapping
->host
;
735 struct fuse_conn
*fc
= get_fuse_conn(inode
);
736 struct fuse_req
*req
;
738 loff_t pos
= page_offset(page
);
739 size_t count
= PAGE_CACHE_SIZE
;
744 * Page writeback can extend beyond the lifetime of the
745 * page-cache page, so make sure we read a properly synced
748 fuse_wait_on_page_writeback(inode
, page
->index
);
750 req
= fuse_get_req(fc
, 1);
754 attr_ver
= fuse_get_attr_version(fc
);
756 req
->out
.page_zeroing
= 1;
757 req
->out
.argpages
= 1;
759 req
->pages
[0] = page
;
760 req
->page_descs
[0].length
= count
;
761 num_read
= fuse_send_read(req
, &io
, pos
, count
, NULL
);
762 err
= req
->out
.h
.error
;
766 * Short read means EOF. If file size is larger, truncate it
768 if (num_read
< count
)
769 fuse_short_read(req
, inode
, attr_ver
);
771 SetPageUptodate(page
);
774 fuse_put_request(fc
, req
);
779 static int fuse_readpage(struct file
*file
, struct page
*page
)
781 struct inode
*inode
= page
->mapping
->host
;
785 if (is_bad_inode(inode
))
788 err
= fuse_do_readpage(file
, page
);
789 fuse_invalidate_atime(inode
);
795 static void fuse_readpages_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
798 size_t count
= req
->misc
.read
.in
.size
;
799 size_t num_read
= req
->out
.args
[0].size
;
800 struct address_space
*mapping
= NULL
;
802 for (i
= 0; mapping
== NULL
&& i
< req
->num_pages
; i
++)
803 mapping
= req
->pages
[i
]->mapping
;
806 struct inode
*inode
= mapping
->host
;
809 * Short read means EOF. If file size is larger, truncate it
811 if (!req
->out
.h
.error
&& num_read
< count
)
812 fuse_short_read(req
, inode
, req
->misc
.read
.attr_ver
);
814 fuse_invalidate_atime(inode
);
817 for (i
= 0; i
< req
->num_pages
; i
++) {
818 struct page
*page
= req
->pages
[i
];
819 if (!req
->out
.h
.error
)
820 SetPageUptodate(page
);
824 page_cache_release(page
);
827 fuse_file_put(req
->ff
, false);
830 static void fuse_send_readpages(struct fuse_req
*req
, struct file
*file
)
832 struct fuse_file
*ff
= file
->private_data
;
833 struct fuse_conn
*fc
= ff
->fc
;
834 loff_t pos
= page_offset(req
->pages
[0]);
835 size_t count
= req
->num_pages
<< PAGE_CACHE_SHIFT
;
837 req
->out
.argpages
= 1;
838 req
->out
.page_zeroing
= 1;
839 req
->out
.page_replace
= 1;
840 fuse_read_fill(req
, file
, pos
, count
, FUSE_READ
);
841 req
->misc
.read
.attr_ver
= fuse_get_attr_version(fc
);
842 if (fc
->async_read
) {
843 req
->ff
= fuse_file_get(ff
);
844 req
->end
= fuse_readpages_end
;
845 fuse_request_send_background(fc
, req
);
847 fuse_request_send(fc
, req
);
848 fuse_readpages_end(fc
, req
);
849 fuse_put_request(fc
, req
);
853 struct fuse_fill_data
{
854 struct fuse_req
*req
;
860 static int fuse_readpages_fill(void *_data
, struct page
*page
)
862 struct fuse_fill_data
*data
= _data
;
863 struct fuse_req
*req
= data
->req
;
864 struct inode
*inode
= data
->inode
;
865 struct fuse_conn
*fc
= get_fuse_conn(inode
);
867 fuse_wait_on_page_writeback(inode
, page
->index
);
869 if (req
->num_pages
&&
870 (req
->num_pages
== FUSE_MAX_PAGES_PER_REQ
||
871 (req
->num_pages
+ 1) * PAGE_CACHE_SIZE
> fc
->max_read
||
872 req
->pages
[req
->num_pages
- 1]->index
+ 1 != page
->index
)) {
873 int nr_alloc
= min_t(unsigned, data
->nr_pages
,
874 FUSE_MAX_PAGES_PER_REQ
);
875 fuse_send_readpages(req
, data
->file
);
877 req
= fuse_get_req_for_background(fc
, nr_alloc
);
879 req
= fuse_get_req(fc
, nr_alloc
);
888 if (WARN_ON(req
->num_pages
>= req
->max_pages
)) {
889 fuse_put_request(fc
, req
);
893 page_cache_get(page
);
894 req
->pages
[req
->num_pages
] = page
;
895 req
->page_descs
[req
->num_pages
].length
= PAGE_SIZE
;
901 static int fuse_readpages(struct file
*file
, struct address_space
*mapping
,
902 struct list_head
*pages
, unsigned nr_pages
)
904 struct inode
*inode
= mapping
->host
;
905 struct fuse_conn
*fc
= get_fuse_conn(inode
);
906 struct fuse_fill_data data
;
908 int nr_alloc
= min_t(unsigned, nr_pages
, FUSE_MAX_PAGES_PER_REQ
);
911 if (is_bad_inode(inode
))
917 data
.req
= fuse_get_req_for_background(fc
, nr_alloc
);
919 data
.req
= fuse_get_req(fc
, nr_alloc
);
920 data
.nr_pages
= nr_pages
;
921 err
= PTR_ERR(data
.req
);
922 if (IS_ERR(data
.req
))
925 err
= read_cache_pages(mapping
, pages
, fuse_readpages_fill
, &data
);
927 if (data
.req
->num_pages
)
928 fuse_send_readpages(data
.req
, file
);
930 fuse_put_request(fc
, data
.req
);
936 static ssize_t
fuse_file_aio_read(struct kiocb
*iocb
, const struct iovec
*iov
,
937 unsigned long nr_segs
, loff_t pos
)
939 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
940 struct fuse_conn
*fc
= get_fuse_conn(inode
);
943 * In auto invalidate mode, always update attributes on read.
944 * Otherwise, only update if we attempt to read past EOF (to ensure
945 * i_size is up to date).
947 if (fc
->auto_inval_data
||
948 (pos
+ iov_length(iov
, nr_segs
) > i_size_read(inode
))) {
950 err
= fuse_update_attributes(inode
, NULL
, iocb
->ki_filp
, NULL
);
955 return generic_file_aio_read(iocb
, iov
, nr_segs
, pos
);
958 static void fuse_write_fill(struct fuse_req
*req
, struct fuse_file
*ff
,
959 loff_t pos
, size_t count
)
961 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
962 struct fuse_write_out
*outarg
= &req
->misc
.write
.out
;
967 req
->in
.h
.opcode
= FUSE_WRITE
;
968 req
->in
.h
.nodeid
= ff
->nodeid
;
970 if (ff
->fc
->minor
< 9)
971 req
->in
.args
[0].size
= FUSE_COMPAT_WRITE_IN_SIZE
;
973 req
->in
.args
[0].size
= sizeof(struct fuse_write_in
);
974 req
->in
.args
[0].value
= inarg
;
975 req
->in
.args
[1].size
= count
;
976 req
->out
.numargs
= 1;
977 req
->out
.args
[0].size
= sizeof(struct fuse_write_out
);
978 req
->out
.args
[0].value
= outarg
;
981 static size_t fuse_send_write(struct fuse_req
*req
, struct fuse_io_priv
*io
,
982 loff_t pos
, size_t count
, fl_owner_t owner
)
984 struct file
*file
= io
->file
;
985 struct fuse_file
*ff
= file
->private_data
;
986 struct fuse_conn
*fc
= ff
->fc
;
987 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
989 fuse_write_fill(req
, ff
, pos
, count
);
990 inarg
->flags
= file
->f_flags
;
992 inarg
->write_flags
|= FUSE_WRITE_LOCKOWNER
;
993 inarg
->lock_owner
= fuse_lock_owner_id(fc
, owner
);
997 return fuse_async_req_send(fc
, req
, count
, io
);
999 fuse_request_send(fc
, req
);
1000 return req
->misc
.write
.out
.size
;
1003 bool fuse_write_update_size(struct inode
*inode
, loff_t pos
)
1005 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1006 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1009 spin_lock(&fc
->lock
);
1010 fi
->attr_version
= ++fc
->attr_version
;
1011 if (pos
> inode
->i_size
) {
1012 i_size_write(inode
, pos
);
1015 spin_unlock(&fc
->lock
);
1020 static size_t fuse_send_write_pages(struct fuse_req
*req
, struct file
*file
,
1021 struct inode
*inode
, loff_t pos
,
1027 struct fuse_io_priv io
= { .async
= 0, .file
= file
};
1029 for (i
= 0; i
< req
->num_pages
; i
++)
1030 fuse_wait_on_page_writeback(inode
, req
->pages
[i
]->index
);
1032 res
= fuse_send_write(req
, &io
, pos
, count
, NULL
);
1034 offset
= req
->page_descs
[0].offset
;
1036 for (i
= 0; i
< req
->num_pages
; i
++) {
1037 struct page
*page
= req
->pages
[i
];
1039 if (!req
->out
.h
.error
&& !offset
&& count
>= PAGE_CACHE_SIZE
)
1040 SetPageUptodate(page
);
1042 if (count
> PAGE_CACHE_SIZE
- offset
)
1043 count
-= PAGE_CACHE_SIZE
- offset
;
1049 page_cache_release(page
);
1055 static ssize_t
fuse_fill_write_pages(struct fuse_req
*req
,
1056 struct address_space
*mapping
,
1057 struct iov_iter
*ii
, loff_t pos
)
1059 struct fuse_conn
*fc
= get_fuse_conn(mapping
->host
);
1060 unsigned offset
= pos
& (PAGE_CACHE_SIZE
- 1);
1064 req
->in
.argpages
= 1;
1065 req
->page_descs
[0].offset
= offset
;
1070 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
1071 size_t bytes
= min_t(size_t, PAGE_CACHE_SIZE
- offset
,
1072 iov_iter_count(ii
));
1074 bytes
= min_t(size_t, bytes
, fc
->max_write
- count
);
1078 if (iov_iter_fault_in_readable(ii
, bytes
))
1082 page
= grab_cache_page_write_begin(mapping
, index
, 0);
1086 if (mapping_writably_mapped(mapping
))
1087 flush_dcache_page(page
);
1089 tmp
= iov_iter_copy_from_user_atomic(page
, ii
, offset
, bytes
);
1090 flush_dcache_page(page
);
1094 page_cache_release(page
);
1095 bytes
= min(bytes
, iov_iter_single_seg_count(ii
));
1100 req
->pages
[req
->num_pages
] = page
;
1101 req
->page_descs
[req
->num_pages
].length
= tmp
;
1104 iov_iter_advance(ii
, tmp
);
1108 if (offset
== PAGE_CACHE_SIZE
)
1111 if (!fc
->big_writes
)
1113 } while (iov_iter_count(ii
) && count
< fc
->max_write
&&
1114 req
->num_pages
< req
->max_pages
&& offset
== 0);
1116 return count
> 0 ? count
: err
;
1119 static inline unsigned fuse_wr_pages(loff_t pos
, size_t len
)
1121 return min_t(unsigned,
1122 ((pos
+ len
- 1) >> PAGE_CACHE_SHIFT
) -
1123 (pos
>> PAGE_CACHE_SHIFT
) + 1,
1124 FUSE_MAX_PAGES_PER_REQ
);
1127 static ssize_t
fuse_perform_write(struct file
*file
,
1128 struct address_space
*mapping
,
1129 struct iov_iter
*ii
, loff_t pos
)
1131 struct inode
*inode
= mapping
->host
;
1132 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1133 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1137 if (is_bad_inode(inode
))
1140 if (inode
->i_size
< pos
+ iov_iter_count(ii
))
1141 set_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
1144 struct fuse_req
*req
;
1146 unsigned nr_pages
= fuse_wr_pages(pos
, iov_iter_count(ii
));
1148 req
= fuse_get_req(fc
, nr_pages
);
1154 count
= fuse_fill_write_pages(req
, mapping
, ii
, pos
);
1160 num_written
= fuse_send_write_pages(req
, file
, inode
,
1162 err
= req
->out
.h
.error
;
1167 /* break out of the loop on short write */
1168 if (num_written
!= count
)
1172 fuse_put_request(fc
, req
);
1173 } while (!err
&& iov_iter_count(ii
));
1176 fuse_write_update_size(inode
, pos
);
1178 clear_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
1179 fuse_invalidate_attr(inode
);
1181 return res
> 0 ? res
: err
;
1184 static ssize_t
fuse_file_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
1185 unsigned long nr_segs
, loff_t pos
)
1187 struct file
*file
= iocb
->ki_filp
;
1188 struct address_space
*mapping
= file
->f_mapping
;
1191 ssize_t written
= 0;
1192 ssize_t written_buffered
= 0;
1193 struct inode
*inode
= mapping
->host
;
1198 if (get_fuse_conn(inode
)->writeback_cache
) {
1199 /* Update size (EOF optimization) and mode (SUID clearing) */
1200 err
= fuse_update_attributes(mapping
->host
, NULL
, file
, NULL
);
1204 return generic_file_aio_write(iocb
, iov
, nr_segs
, pos
);
1207 WARN_ON(iocb
->ki_pos
!= pos
);
1210 err
= generic_segment_checks(iov
, &nr_segs
, &ocount
, VERIFY_READ
);
1215 mutex_lock(&inode
->i_mutex
);
1217 /* We can write back this queue in page reclaim */
1218 current
->backing_dev_info
= mapping
->backing_dev_info
;
1220 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
1227 err
= file_remove_suid(file
);
1231 err
= file_update_time(file
);
1235 if (file
->f_flags
& O_DIRECT
) {
1236 written
= generic_file_direct_write(iocb
, iov
, &nr_segs
, pos
,
1238 if (written
< 0 || written
== count
)
1244 iov_iter_init(&i
, iov
, nr_segs
, count
, written
);
1245 written_buffered
= fuse_perform_write(file
, mapping
, &i
, pos
);
1246 if (written_buffered
< 0) {
1247 err
= written_buffered
;
1250 endbyte
= pos
+ written_buffered
- 1;
1252 err
= filemap_write_and_wait_range(file
->f_mapping
, pos
,
1257 invalidate_mapping_pages(file
->f_mapping
,
1258 pos
>> PAGE_CACHE_SHIFT
,
1259 endbyte
>> PAGE_CACHE_SHIFT
);
1261 written
+= written_buffered
;
1262 iocb
->ki_pos
= pos
+ written_buffered
;
1264 iov_iter_init(&i
, iov
, nr_segs
, count
, 0);
1265 written
= fuse_perform_write(file
, mapping
, &i
, pos
);
1267 iocb
->ki_pos
= pos
+ written
;
1270 current
->backing_dev_info
= NULL
;
1271 mutex_unlock(&inode
->i_mutex
);
1273 return written
? written
: err
;
1276 static inline void fuse_page_descs_length_init(struct fuse_req
*req
,
1277 unsigned index
, unsigned nr_pages
)
1281 for (i
= index
; i
< index
+ nr_pages
; i
++)
1282 req
->page_descs
[i
].length
= PAGE_SIZE
-
1283 req
->page_descs
[i
].offset
;
1286 static inline unsigned long fuse_get_user_addr(const struct iov_iter
*ii
)
1288 return (unsigned long)ii
->iov
->iov_base
+ ii
->iov_offset
;
1291 static inline size_t fuse_get_frag_size(const struct iov_iter
*ii
,
1294 return min(iov_iter_single_seg_count(ii
), max_size
);
1297 static int fuse_get_user_pages(struct fuse_req
*req
, struct iov_iter
*ii
,
1298 size_t *nbytesp
, int write
)
1300 size_t nbytes
= 0; /* # bytes already packed in req */
1302 /* Special case for kernel I/O: can copy directly into the buffer */
1303 if (segment_eq(get_fs(), KERNEL_DS
)) {
1304 unsigned long user_addr
= fuse_get_user_addr(ii
);
1305 size_t frag_size
= fuse_get_frag_size(ii
, *nbytesp
);
1308 req
->in
.args
[1].value
= (void *) user_addr
;
1310 req
->out
.args
[0].value
= (void *) user_addr
;
1312 iov_iter_advance(ii
, frag_size
);
1313 *nbytesp
= frag_size
;
1317 while (nbytes
< *nbytesp
&& req
->num_pages
< req
->max_pages
) {
1319 unsigned long user_addr
= fuse_get_user_addr(ii
);
1320 unsigned offset
= user_addr
& ~PAGE_MASK
;
1321 size_t frag_size
= fuse_get_frag_size(ii
, *nbytesp
- nbytes
);
1324 unsigned n
= req
->max_pages
- req
->num_pages
;
1325 frag_size
= min_t(size_t, frag_size
, n
<< PAGE_SHIFT
);
1327 npages
= (frag_size
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1328 npages
= clamp(npages
, 1U, n
);
1330 ret
= get_user_pages_fast(user_addr
, npages
, !write
,
1331 &req
->pages
[req
->num_pages
]);
1336 frag_size
= min_t(size_t, frag_size
,
1337 (npages
<< PAGE_SHIFT
) - offset
);
1338 iov_iter_advance(ii
, frag_size
);
1340 req
->page_descs
[req
->num_pages
].offset
= offset
;
1341 fuse_page_descs_length_init(req
, req
->num_pages
, npages
);
1343 req
->num_pages
+= npages
;
1344 req
->page_descs
[req
->num_pages
- 1].length
-=
1345 (npages
<< PAGE_SHIFT
) - offset
- frag_size
;
1347 nbytes
+= frag_size
;
1351 req
->in
.argpages
= 1;
1353 req
->out
.argpages
= 1;
1360 static inline int fuse_iter_npages(const struct iov_iter
*ii_p
)
1362 struct iov_iter ii
= *ii_p
;
1365 while (iov_iter_count(&ii
) && npages
< FUSE_MAX_PAGES_PER_REQ
) {
1366 unsigned long user_addr
= fuse_get_user_addr(&ii
);
1367 unsigned offset
= user_addr
& ~PAGE_MASK
;
1368 size_t frag_size
= iov_iter_single_seg_count(&ii
);
1370 npages
+= (frag_size
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1371 iov_iter_advance(&ii
, frag_size
);
1374 return min(npages
, FUSE_MAX_PAGES_PER_REQ
);
1377 ssize_t
fuse_direct_io(struct fuse_io_priv
*io
, const struct iovec
*iov
,
1378 unsigned long nr_segs
, size_t count
, loff_t
*ppos
,
1381 int write
= flags
& FUSE_DIO_WRITE
;
1382 int cuse
= flags
& FUSE_DIO_CUSE
;
1383 struct file
*file
= io
->file
;
1384 struct inode
*inode
= file
->f_mapping
->host
;
1385 struct fuse_file
*ff
= file
->private_data
;
1386 struct fuse_conn
*fc
= ff
->fc
;
1387 size_t nmax
= write
? fc
->max_write
: fc
->max_read
;
1389 pgoff_t idx_from
= pos
>> PAGE_CACHE_SHIFT
;
1390 pgoff_t idx_to
= (pos
+ count
- 1) >> PAGE_CACHE_SHIFT
;
1392 struct fuse_req
*req
;
1395 iov_iter_init(&ii
, iov
, nr_segs
, count
, 0);
1398 req
= fuse_get_req_for_background(fc
, fuse_iter_npages(&ii
));
1400 req
= fuse_get_req(fc
, fuse_iter_npages(&ii
));
1402 return PTR_ERR(req
);
1404 if (!cuse
&& fuse_range_is_writeback(inode
, idx_from
, idx_to
)) {
1406 mutex_lock(&inode
->i_mutex
);
1407 fuse_sync_writes(inode
);
1409 mutex_unlock(&inode
->i_mutex
);
1414 fl_owner_t owner
= current
->files
;
1415 size_t nbytes
= min(count
, nmax
);
1416 int err
= fuse_get_user_pages(req
, &ii
, &nbytes
, write
);
1423 nres
= fuse_send_write(req
, io
, pos
, nbytes
, owner
);
1425 nres
= fuse_send_read(req
, io
, pos
, nbytes
, owner
);
1428 fuse_release_user_pages(req
, !write
);
1429 if (req
->out
.h
.error
) {
1431 res
= req
->out
.h
.error
;
1433 } else if (nres
> nbytes
) {
1443 fuse_put_request(fc
, req
);
1445 req
= fuse_get_req_for_background(fc
,
1446 fuse_iter_npages(&ii
));
1448 req
= fuse_get_req(fc
, fuse_iter_npages(&ii
));
1454 fuse_put_request(fc
, req
);
1460 EXPORT_SYMBOL_GPL(fuse_direct_io
);
1462 static ssize_t
__fuse_direct_read(struct fuse_io_priv
*io
,
1463 const struct iovec
*iov
,
1464 unsigned long nr_segs
, loff_t
*ppos
,
1468 struct file
*file
= io
->file
;
1469 struct inode
*inode
= file_inode(file
);
1471 if (is_bad_inode(inode
))
1474 res
= fuse_direct_io(io
, iov
, nr_segs
, count
, ppos
, 0);
1476 fuse_invalidate_attr(inode
);
1481 static ssize_t
fuse_direct_read(struct file
*file
, char __user
*buf
,
1482 size_t count
, loff_t
*ppos
)
1484 struct fuse_io_priv io
= { .async
= 0, .file
= file
};
1485 struct iovec iov
= { .iov_base
= buf
, .iov_len
= count
};
1486 return __fuse_direct_read(&io
, &iov
, 1, ppos
, count
);
1489 static ssize_t
__fuse_direct_write(struct fuse_io_priv
*io
,
1490 const struct iovec
*iov
,
1491 unsigned long nr_segs
, loff_t
*ppos
)
1493 struct file
*file
= io
->file
;
1494 struct inode
*inode
= file_inode(file
);
1495 size_t count
= iov_length(iov
, nr_segs
);
1498 res
= generic_write_checks(file
, ppos
, &count
, 0);
1500 res
= fuse_direct_io(io
, iov
, nr_segs
, count
, ppos
,
1503 fuse_invalidate_attr(inode
);
1508 static ssize_t
fuse_direct_write(struct file
*file
, const char __user
*buf
,
1509 size_t count
, loff_t
*ppos
)
1511 struct iovec iov
= { .iov_base
= (void __user
*)buf
, .iov_len
= count
};
1512 struct inode
*inode
= file_inode(file
);
1514 struct fuse_io_priv io
= { .async
= 0, .file
= file
};
1516 if (is_bad_inode(inode
))
1519 /* Don't allow parallel writes to the same file */
1520 mutex_lock(&inode
->i_mutex
);
1521 res
= __fuse_direct_write(&io
, &iov
, 1, ppos
);
1523 fuse_write_update_size(inode
, *ppos
);
1524 mutex_unlock(&inode
->i_mutex
);
1529 static void fuse_writepage_free(struct fuse_conn
*fc
, struct fuse_req
*req
)
1533 for (i
= 0; i
< req
->num_pages
; i
++)
1534 __free_page(req
->pages
[i
]);
1537 fuse_file_put(req
->ff
, false);
1540 static void fuse_writepage_finish(struct fuse_conn
*fc
, struct fuse_req
*req
)
1542 struct inode
*inode
= req
->inode
;
1543 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1544 struct backing_dev_info
*bdi
= inode
->i_mapping
->backing_dev_info
;
1547 list_del(&req
->writepages_entry
);
1548 for (i
= 0; i
< req
->num_pages
; i
++) {
1549 dec_bdi_stat(bdi
, BDI_WRITEBACK
);
1550 dec_zone_page_state(req
->pages
[i
], NR_WRITEBACK_TEMP
);
1551 bdi_writeout_inc(bdi
);
1553 wake_up(&fi
->page_waitq
);
1556 /* Called under fc->lock, may release and reacquire it */
1557 static void fuse_send_writepage(struct fuse_conn
*fc
, struct fuse_req
*req
,
1559 __releases(fc
->lock
)
1560 __acquires(fc
->lock
)
1562 struct fuse_inode
*fi
= get_fuse_inode(req
->inode
);
1563 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
1564 __u64 data_size
= req
->num_pages
* PAGE_CACHE_SIZE
;
1569 if (inarg
->offset
+ data_size
<= size
) {
1570 inarg
->size
= data_size
;
1571 } else if (inarg
->offset
< size
) {
1572 inarg
->size
= size
- inarg
->offset
;
1574 /* Got truncated off completely */
1578 req
->in
.args
[1].size
= inarg
->size
;
1580 fuse_request_send_background_locked(fc
, req
);
1584 fuse_writepage_finish(fc
, req
);
1585 spin_unlock(&fc
->lock
);
1586 fuse_writepage_free(fc
, req
);
1587 fuse_put_request(fc
, req
);
1588 spin_lock(&fc
->lock
);
1592 * If fi->writectr is positive (no truncate or fsync going on) send
1593 * all queued writepage requests.
1595 * Called with fc->lock
1597 void fuse_flush_writepages(struct inode
*inode
)
1598 __releases(fc
->lock
)
1599 __acquires(fc
->lock
)
1601 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1602 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1603 size_t crop
= i_size_read(inode
);
1604 struct fuse_req
*req
;
1606 while (fi
->writectr
>= 0 && !list_empty(&fi
->queued_writes
)) {
1607 req
= list_entry(fi
->queued_writes
.next
, struct fuse_req
, list
);
1608 list_del_init(&req
->list
);
1609 fuse_send_writepage(fc
, req
, crop
);
1613 static void fuse_writepage_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
1615 struct inode
*inode
= req
->inode
;
1616 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1618 mapping_set_error(inode
->i_mapping
, req
->out
.h
.error
);
1619 spin_lock(&fc
->lock
);
1620 while (req
->misc
.write
.next
) {
1621 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1622 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
1623 struct fuse_req
*next
= req
->misc
.write
.next
;
1624 req
->misc
.write
.next
= next
->misc
.write
.next
;
1625 next
->misc
.write
.next
= NULL
;
1626 next
->ff
= fuse_file_get(req
->ff
);
1627 list_add(&next
->writepages_entry
, &fi
->writepages
);
1630 * Skip fuse_flush_writepages() to make it easy to crop requests
1631 * based on primary request size.
1633 * 1st case (trivial): there are no concurrent activities using
1634 * fuse_set/release_nowrite. Then we're on safe side because
1635 * fuse_flush_writepages() would call fuse_send_writepage()
1638 * 2nd case: someone called fuse_set_nowrite and it is waiting
1639 * now for completion of all in-flight requests. This happens
1640 * rarely and no more than once per page, so this should be
1643 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
1644 * of fuse_set_nowrite..fuse_release_nowrite section. The fact
1645 * that fuse_set_nowrite returned implies that all in-flight
1646 * requests were completed along with all of their secondary
1647 * requests. Further primary requests are blocked by negative
1648 * writectr. Hence there cannot be any in-flight requests and
1649 * no invocations of fuse_writepage_end() while we're in
1650 * fuse_set_nowrite..fuse_release_nowrite section.
1652 fuse_send_writepage(fc
, next
, inarg
->offset
+ inarg
->size
);
1655 fuse_writepage_finish(fc
, req
);
1656 spin_unlock(&fc
->lock
);
1657 fuse_writepage_free(fc
, req
);
1660 static struct fuse_file
*__fuse_write_file_get(struct fuse_conn
*fc
,
1661 struct fuse_inode
*fi
)
1663 struct fuse_file
*ff
= NULL
;
1665 spin_lock(&fc
->lock
);
1666 if (!list_empty(&fi
->write_files
)) {
1667 ff
= list_entry(fi
->write_files
.next
, struct fuse_file
,
1671 spin_unlock(&fc
->lock
);
1676 static struct fuse_file
*fuse_write_file_get(struct fuse_conn
*fc
,
1677 struct fuse_inode
*fi
)
1679 struct fuse_file
*ff
= __fuse_write_file_get(fc
, fi
);
1684 int fuse_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
1686 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1687 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1688 struct fuse_file
*ff
;
1691 ff
= __fuse_write_file_get(fc
, fi
);
1692 err
= fuse_flush_times(inode
, ff
);
1694 fuse_file_put(ff
, 0);
1699 static int fuse_writepage_locked(struct page
*page
)
1701 struct address_space
*mapping
= page
->mapping
;
1702 struct inode
*inode
= mapping
->host
;
1703 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1704 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1705 struct fuse_req
*req
;
1706 struct page
*tmp_page
;
1707 int error
= -ENOMEM
;
1709 set_page_writeback(page
);
1711 req
= fuse_request_alloc_nofs(1);
1715 req
->background
= 1; /* writeback always goes to bg_queue */
1716 tmp_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1721 req
->ff
= fuse_write_file_get(fc
, fi
);
1725 fuse_write_fill(req
, req
->ff
, page_offset(page
), 0);
1727 copy_highpage(tmp_page
, page
);
1728 req
->misc
.write
.in
.write_flags
|= FUSE_WRITE_CACHE
;
1729 req
->misc
.write
.next
= NULL
;
1730 req
->in
.argpages
= 1;
1732 req
->pages
[0] = tmp_page
;
1733 req
->page_descs
[0].offset
= 0;
1734 req
->page_descs
[0].length
= PAGE_SIZE
;
1735 req
->end
= fuse_writepage_end
;
1738 inc_bdi_stat(mapping
->backing_dev_info
, BDI_WRITEBACK
);
1739 inc_zone_page_state(tmp_page
, NR_WRITEBACK_TEMP
);
1741 spin_lock(&fc
->lock
);
1742 list_add(&req
->writepages_entry
, &fi
->writepages
);
1743 list_add_tail(&req
->list
, &fi
->queued_writes
);
1744 fuse_flush_writepages(inode
);
1745 spin_unlock(&fc
->lock
);
1747 end_page_writeback(page
);
1752 fuse_request_free(req
);
1754 end_page_writeback(page
);
1758 static int fuse_writepage(struct page
*page
, struct writeback_control
*wbc
)
1762 if (fuse_page_is_writeback(page
->mapping
->host
, page
->index
)) {
1764 * ->writepages() should be called for sync() and friends. We
1765 * should only get here on direct reclaim and then we are
1766 * allowed to skip a page which is already in flight
1768 WARN_ON(wbc
->sync_mode
== WB_SYNC_ALL
);
1770 redirty_page_for_writepage(wbc
, page
);
1774 err
= fuse_writepage_locked(page
);
1780 struct fuse_fill_wb_data
{
1781 struct fuse_req
*req
;
1782 struct fuse_file
*ff
;
1783 struct inode
*inode
;
1784 struct page
**orig_pages
;
1787 static void fuse_writepages_send(struct fuse_fill_wb_data
*data
)
1789 struct fuse_req
*req
= data
->req
;
1790 struct inode
*inode
= data
->inode
;
1791 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1792 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1793 int num_pages
= req
->num_pages
;
1796 req
->ff
= fuse_file_get(data
->ff
);
1797 spin_lock(&fc
->lock
);
1798 list_add_tail(&req
->list
, &fi
->queued_writes
);
1799 fuse_flush_writepages(inode
);
1800 spin_unlock(&fc
->lock
);
1802 for (i
= 0; i
< num_pages
; i
++)
1803 end_page_writeback(data
->orig_pages
[i
]);
1806 static bool fuse_writepage_in_flight(struct fuse_req
*new_req
,
1809 struct fuse_conn
*fc
= get_fuse_conn(new_req
->inode
);
1810 struct fuse_inode
*fi
= get_fuse_inode(new_req
->inode
);
1811 struct fuse_req
*tmp
;
1812 struct fuse_req
*old_req
;
1816 BUG_ON(new_req
->num_pages
!= 0);
1818 spin_lock(&fc
->lock
);
1819 list_del(&new_req
->writepages_entry
);
1820 list_for_each_entry(old_req
, &fi
->writepages
, writepages_entry
) {
1821 BUG_ON(old_req
->inode
!= new_req
->inode
);
1822 curr_index
= old_req
->misc
.write
.in
.offset
>> PAGE_CACHE_SHIFT
;
1823 if (curr_index
<= page
->index
&&
1824 page
->index
< curr_index
+ old_req
->num_pages
) {
1830 list_add(&new_req
->writepages_entry
, &fi
->writepages
);
1834 new_req
->num_pages
= 1;
1835 for (tmp
= old_req
; tmp
!= NULL
; tmp
= tmp
->misc
.write
.next
) {
1836 BUG_ON(tmp
->inode
!= new_req
->inode
);
1837 curr_index
= tmp
->misc
.write
.in
.offset
>> PAGE_CACHE_SHIFT
;
1838 if (tmp
->num_pages
== 1 &&
1839 curr_index
== page
->index
) {
1844 if (old_req
->num_pages
== 1 && (old_req
->state
== FUSE_REQ_INIT
||
1845 old_req
->state
== FUSE_REQ_PENDING
)) {
1846 struct backing_dev_info
*bdi
= page
->mapping
->backing_dev_info
;
1848 copy_highpage(old_req
->pages
[0], page
);
1849 spin_unlock(&fc
->lock
);
1851 dec_bdi_stat(bdi
, BDI_WRITEBACK
);
1852 dec_zone_page_state(page
, NR_WRITEBACK_TEMP
);
1853 bdi_writeout_inc(bdi
);
1854 fuse_writepage_free(fc
, new_req
);
1855 fuse_request_free(new_req
);
1858 new_req
->misc
.write
.next
= old_req
->misc
.write
.next
;
1859 old_req
->misc
.write
.next
= new_req
;
1862 spin_unlock(&fc
->lock
);
1867 static int fuse_writepages_fill(struct page
*page
,
1868 struct writeback_control
*wbc
, void *_data
)
1870 struct fuse_fill_wb_data
*data
= _data
;
1871 struct fuse_req
*req
= data
->req
;
1872 struct inode
*inode
= data
->inode
;
1873 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1874 struct page
*tmp_page
;
1880 data
->ff
= fuse_write_file_get(fc
, get_fuse_inode(inode
));
1886 * Being under writeback is unlikely but possible. For example direct
1887 * read to an mmaped fuse file will set the page dirty twice; once when
1888 * the pages are faulted with get_user_pages(), and then after the read
1891 is_writeback
= fuse_page_is_writeback(inode
, page
->index
);
1893 if (req
&& req
->num_pages
&&
1894 (is_writeback
|| req
->num_pages
== FUSE_MAX_PAGES_PER_REQ
||
1895 (req
->num_pages
+ 1) * PAGE_CACHE_SIZE
> fc
->max_write
||
1896 data
->orig_pages
[req
->num_pages
- 1]->index
+ 1 != page
->index
)) {
1897 fuse_writepages_send(data
);
1901 tmp_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1906 * The page must not be redirtied until the writeout is completed
1907 * (i.e. userspace has sent a reply to the write request). Otherwise
1908 * there could be more than one temporary page instance for each real
1911 * This is ensured by holding the page lock in page_mkwrite() while
1912 * checking fuse_page_is_writeback(). We already hold the page lock
1913 * since clear_page_dirty_for_io() and keep it held until we add the
1914 * request to the fi->writepages list and increment req->num_pages.
1915 * After this fuse_page_is_writeback() will indicate that the page is
1916 * under writeback, so we can release the page lock.
1918 if (data
->req
== NULL
) {
1919 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1922 req
= fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ
);
1924 __free_page(tmp_page
);
1928 fuse_write_fill(req
, data
->ff
, page_offset(page
), 0);
1929 req
->misc
.write
.in
.write_flags
|= FUSE_WRITE_CACHE
;
1930 req
->misc
.write
.next
= NULL
;
1931 req
->in
.argpages
= 1;
1932 req
->background
= 1;
1934 req
->end
= fuse_writepage_end
;
1937 spin_lock(&fc
->lock
);
1938 list_add(&req
->writepages_entry
, &fi
->writepages
);
1939 spin_unlock(&fc
->lock
);
1943 set_page_writeback(page
);
1945 copy_highpage(tmp_page
, page
);
1946 req
->pages
[req
->num_pages
] = tmp_page
;
1947 req
->page_descs
[req
->num_pages
].offset
= 0;
1948 req
->page_descs
[req
->num_pages
].length
= PAGE_SIZE
;
1950 inc_bdi_stat(page
->mapping
->backing_dev_info
, BDI_WRITEBACK
);
1951 inc_zone_page_state(tmp_page
, NR_WRITEBACK_TEMP
);
1954 if (is_writeback
&& fuse_writepage_in_flight(req
, page
)) {
1955 end_page_writeback(page
);
1959 data
->orig_pages
[req
->num_pages
] = page
;
1962 * Protected by fc->lock against concurrent access by
1963 * fuse_page_is_writeback().
1965 spin_lock(&fc
->lock
);
1967 spin_unlock(&fc
->lock
);
1975 static int fuse_writepages(struct address_space
*mapping
,
1976 struct writeback_control
*wbc
)
1978 struct inode
*inode
= mapping
->host
;
1979 struct fuse_fill_wb_data data
;
1983 if (is_bad_inode(inode
))
1991 data
.orig_pages
= kzalloc(sizeof(struct page
*) *
1992 FUSE_MAX_PAGES_PER_REQ
,
1994 if (!data
.orig_pages
)
1997 err
= write_cache_pages(mapping
, wbc
, fuse_writepages_fill
, &data
);
1999 /* Ignore errors if we can write at least one page */
2000 BUG_ON(!data
.req
->num_pages
);
2001 fuse_writepages_send(&data
);
2005 fuse_file_put(data
.ff
, false);
2007 kfree(data
.orig_pages
);
2013 * It's worthy to make sure that space is reserved on disk for the write,
2014 * but how to implement it without killing performance need more thinking.
2016 static int fuse_write_begin(struct file
*file
, struct address_space
*mapping
,
2017 loff_t pos
, unsigned len
, unsigned flags
,
2018 struct page
**pagep
, void **fsdata
)
2020 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
2021 struct fuse_conn
*fc
= get_fuse_conn(file
->f_dentry
->d_inode
);
2026 WARN_ON(!fc
->writeback_cache
);
2028 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
2032 fuse_wait_on_page_writeback(mapping
->host
, page
->index
);
2034 if (PageUptodate(page
) || len
== PAGE_CACHE_SIZE
)
2037 * Check if the start this page comes after the end of file, in which
2038 * case the readpage can be optimized away.
2040 fsize
= i_size_read(mapping
->host
);
2041 if (fsize
<= (pos
& PAGE_CACHE_MASK
)) {
2042 size_t off
= pos
& ~PAGE_CACHE_MASK
;
2044 zero_user_segment(page
, 0, off
);
2047 err
= fuse_do_readpage(file
, page
);
2056 page_cache_release(page
);
2061 static int fuse_write_end(struct file
*file
, struct address_space
*mapping
,
2062 loff_t pos
, unsigned len
, unsigned copied
,
2063 struct page
*page
, void *fsdata
)
2065 struct inode
*inode
= page
->mapping
->host
;
2067 if (!PageUptodate(page
)) {
2068 /* Zero any unwritten bytes at the end of the page */
2069 size_t endoff
= (pos
+ copied
) & ~PAGE_CACHE_MASK
;
2071 zero_user_segment(page
, endoff
, PAGE_CACHE_SIZE
);
2072 SetPageUptodate(page
);
2075 fuse_write_update_size(inode
, pos
+ copied
);
2076 set_page_dirty(page
);
2078 page_cache_release(page
);
2083 static int fuse_launder_page(struct page
*page
)
2086 if (clear_page_dirty_for_io(page
)) {
2087 struct inode
*inode
= page
->mapping
->host
;
2088 err
= fuse_writepage_locked(page
);
2090 fuse_wait_on_page_writeback(inode
, page
->index
);
2096 * Write back dirty pages now, because there may not be any suitable
2099 static void fuse_vma_close(struct vm_area_struct
*vma
)
2101 filemap_write_and_wait(vma
->vm_file
->f_mapping
);
2105 * Wait for writeback against this page to complete before allowing it
2106 * to be marked dirty again, and hence written back again, possibly
2107 * before the previous writepage completed.
2109 * Block here, instead of in ->writepage(), so that the userspace fs
2110 * can only block processes actually operating on the filesystem.
2112 * Otherwise unprivileged userspace fs would be able to block
2117 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
2119 static int fuse_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
2121 struct page
*page
= vmf
->page
;
2122 struct inode
*inode
= file_inode(vma
->vm_file
);
2124 file_update_time(vma
->vm_file
);
2126 if (page
->mapping
!= inode
->i_mapping
) {
2128 return VM_FAULT_NOPAGE
;
2131 fuse_wait_on_page_writeback(inode
, page
->index
);
2132 return VM_FAULT_LOCKED
;
2135 static const struct vm_operations_struct fuse_file_vm_ops
= {
2136 .close
= fuse_vma_close
,
2137 .fault
= filemap_fault
,
2138 .map_pages
= filemap_map_pages
,
2139 .page_mkwrite
= fuse_page_mkwrite
,
2140 .remap_pages
= generic_file_remap_pages
,
2143 static int fuse_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2145 if ((vma
->vm_flags
& VM_SHARED
) && (vma
->vm_flags
& VM_MAYWRITE
))
2146 fuse_link_write_file(file
);
2148 file_accessed(file
);
2149 vma
->vm_ops
= &fuse_file_vm_ops
;
2153 static int fuse_direct_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2155 /* Can't provide the coherency needed for MAP_SHARED */
2156 if (vma
->vm_flags
& VM_MAYSHARE
)
2159 invalidate_inode_pages2(file
->f_mapping
);
2161 return generic_file_mmap(file
, vma
);
2164 static int convert_fuse_file_lock(const struct fuse_file_lock
*ffl
,
2165 struct file_lock
*fl
)
2167 switch (ffl
->type
) {
2173 if (ffl
->start
> OFFSET_MAX
|| ffl
->end
> OFFSET_MAX
||
2174 ffl
->end
< ffl
->start
)
2177 fl
->fl_start
= ffl
->start
;
2178 fl
->fl_end
= ffl
->end
;
2179 fl
->fl_pid
= ffl
->pid
;
2185 fl
->fl_type
= ffl
->type
;
2189 static void fuse_lk_fill(struct fuse_req
*req
, struct file
*file
,
2190 const struct file_lock
*fl
, int opcode
, pid_t pid
,
2193 struct inode
*inode
= file_inode(file
);
2194 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2195 struct fuse_file
*ff
= file
->private_data
;
2196 struct fuse_lk_in
*arg
= &req
->misc
.lk_in
;
2199 arg
->owner
= fuse_lock_owner_id(fc
, fl
->fl_owner
);
2200 arg
->lk
.start
= fl
->fl_start
;
2201 arg
->lk
.end
= fl
->fl_end
;
2202 arg
->lk
.type
= fl
->fl_type
;
2205 arg
->lk_flags
|= FUSE_LK_FLOCK
;
2206 req
->in
.h
.opcode
= opcode
;
2207 req
->in
.h
.nodeid
= get_node_id(inode
);
2208 req
->in
.numargs
= 1;
2209 req
->in
.args
[0].size
= sizeof(*arg
);
2210 req
->in
.args
[0].value
= arg
;
2213 static int fuse_getlk(struct file
*file
, struct file_lock
*fl
)
2215 struct inode
*inode
= file_inode(file
);
2216 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2217 struct fuse_req
*req
;
2218 struct fuse_lk_out outarg
;
2221 req
= fuse_get_req_nopages(fc
);
2223 return PTR_ERR(req
);
2225 fuse_lk_fill(req
, file
, fl
, FUSE_GETLK
, 0, 0);
2226 req
->out
.numargs
= 1;
2227 req
->out
.args
[0].size
= sizeof(outarg
);
2228 req
->out
.args
[0].value
= &outarg
;
2229 fuse_request_send(fc
, req
);
2230 err
= req
->out
.h
.error
;
2231 fuse_put_request(fc
, req
);
2233 err
= convert_fuse_file_lock(&outarg
.lk
, fl
);
2238 static int fuse_setlk(struct file
*file
, struct file_lock
*fl
, int flock
)
2240 struct inode
*inode
= file_inode(file
);
2241 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2242 struct fuse_req
*req
;
2243 int opcode
= (fl
->fl_flags
& FL_SLEEP
) ? FUSE_SETLKW
: FUSE_SETLK
;
2244 pid_t pid
= fl
->fl_type
!= F_UNLCK
? current
->tgid
: 0;
2247 if (fl
->fl_lmops
&& fl
->fl_lmops
->lm_grant
) {
2248 /* NLM needs asynchronous locks, which we don't support yet */
2252 /* Unlock on close is handled by the flush method */
2253 if (fl
->fl_flags
& FL_CLOSE
)
2256 req
= fuse_get_req_nopages(fc
);
2258 return PTR_ERR(req
);
2260 fuse_lk_fill(req
, file
, fl
, opcode
, pid
, flock
);
2261 fuse_request_send(fc
, req
);
2262 err
= req
->out
.h
.error
;
2263 /* locking is restartable */
2266 fuse_put_request(fc
, req
);
2270 static int fuse_file_lock(struct file
*file
, int cmd
, struct file_lock
*fl
)
2272 struct inode
*inode
= file_inode(file
);
2273 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2276 if (cmd
== F_CANCELLK
) {
2278 } else if (cmd
== F_GETLK
) {
2280 posix_test_lock(file
, fl
);
2283 err
= fuse_getlk(file
, fl
);
2286 err
= posix_lock_file(file
, fl
, NULL
);
2288 err
= fuse_setlk(file
, fl
, 0);
2293 static int fuse_file_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
2295 struct inode
*inode
= file_inode(file
);
2296 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2300 err
= flock_lock_file_wait(file
, fl
);
2302 struct fuse_file
*ff
= file
->private_data
;
2304 /* emulate flock with POSIX locks */
2306 err
= fuse_setlk(file
, fl
, 1);
2312 static sector_t
fuse_bmap(struct address_space
*mapping
, sector_t block
)
2314 struct inode
*inode
= mapping
->host
;
2315 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2316 struct fuse_req
*req
;
2317 struct fuse_bmap_in inarg
;
2318 struct fuse_bmap_out outarg
;
2321 if (!inode
->i_sb
->s_bdev
|| fc
->no_bmap
)
2324 req
= fuse_get_req_nopages(fc
);
2328 memset(&inarg
, 0, sizeof(inarg
));
2329 inarg
.block
= block
;
2330 inarg
.blocksize
= inode
->i_sb
->s_blocksize
;
2331 req
->in
.h
.opcode
= FUSE_BMAP
;
2332 req
->in
.h
.nodeid
= get_node_id(inode
);
2333 req
->in
.numargs
= 1;
2334 req
->in
.args
[0].size
= sizeof(inarg
);
2335 req
->in
.args
[0].value
= &inarg
;
2336 req
->out
.numargs
= 1;
2337 req
->out
.args
[0].size
= sizeof(outarg
);
2338 req
->out
.args
[0].value
= &outarg
;
2339 fuse_request_send(fc
, req
);
2340 err
= req
->out
.h
.error
;
2341 fuse_put_request(fc
, req
);
2345 return err
? 0 : outarg
.block
;
2348 static loff_t
fuse_file_llseek(struct file
*file
, loff_t offset
, int whence
)
2351 struct inode
*inode
= file_inode(file
);
2353 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
2354 if (whence
== SEEK_CUR
|| whence
== SEEK_SET
)
2355 return generic_file_llseek(file
, offset
, whence
);
2357 mutex_lock(&inode
->i_mutex
);
2358 retval
= fuse_update_attributes(inode
, NULL
, file
, NULL
);
2360 retval
= generic_file_llseek(file
, offset
, whence
);
2361 mutex_unlock(&inode
->i_mutex
);
2366 static int fuse_ioctl_copy_user(struct page
**pages
, struct iovec
*iov
,
2367 unsigned int nr_segs
, size_t bytes
, bool to_user
)
2375 iov_iter_init(&ii
, iov
, nr_segs
, bytes
, 0);
2377 while (iov_iter_count(&ii
)) {
2378 struct page
*page
= pages
[page_idx
++];
2379 size_t todo
= min_t(size_t, PAGE_SIZE
, iov_iter_count(&ii
));
2385 char __user
*uaddr
= ii
.iov
->iov_base
+ ii
.iov_offset
;
2386 size_t iov_len
= ii
.iov
->iov_len
- ii
.iov_offset
;
2387 size_t copy
= min(todo
, iov_len
);
2391 left
= copy_from_user(kaddr
, uaddr
, copy
);
2393 left
= copy_to_user(uaddr
, kaddr
, copy
);
2398 iov_iter_advance(&ii
, copy
);
2410 * CUSE servers compiled on 32bit broke on 64bit kernels because the
2411 * ABI was defined to be 'struct iovec' which is different on 32bit
2412 * and 64bit. Fortunately we can determine which structure the server
2413 * used from the size of the reply.
2415 static int fuse_copy_ioctl_iovec_old(struct iovec
*dst
, void *src
,
2416 size_t transferred
, unsigned count
,
2419 #ifdef CONFIG_COMPAT
2420 if (count
* sizeof(struct compat_iovec
) == transferred
) {
2421 struct compat_iovec
*ciov
= src
;
2425 * With this interface a 32bit server cannot support
2426 * non-compat (i.e. ones coming from 64bit apps) ioctl
2432 for (i
= 0; i
< count
; i
++) {
2433 dst
[i
].iov_base
= compat_ptr(ciov
[i
].iov_base
);
2434 dst
[i
].iov_len
= ciov
[i
].iov_len
;
2440 if (count
* sizeof(struct iovec
) != transferred
)
2443 memcpy(dst
, src
, transferred
);
2447 /* Make sure iov_length() won't overflow */
2448 static int fuse_verify_ioctl_iov(struct iovec
*iov
, size_t count
)
2451 u32 max
= FUSE_MAX_PAGES_PER_REQ
<< PAGE_SHIFT
;
2453 for (n
= 0; n
< count
; n
++, iov
++) {
2454 if (iov
->iov_len
> (size_t) max
)
2456 max
-= iov
->iov_len
;
2461 static int fuse_copy_ioctl_iovec(struct fuse_conn
*fc
, struct iovec
*dst
,
2462 void *src
, size_t transferred
, unsigned count
,
2466 struct fuse_ioctl_iovec
*fiov
= src
;
2468 if (fc
->minor
< 16) {
2469 return fuse_copy_ioctl_iovec_old(dst
, src
, transferred
,
2473 if (count
* sizeof(struct fuse_ioctl_iovec
) != transferred
)
2476 for (i
= 0; i
< count
; i
++) {
2477 /* Did the server supply an inappropriate value? */
2478 if (fiov
[i
].base
!= (unsigned long) fiov
[i
].base
||
2479 fiov
[i
].len
!= (unsigned long) fiov
[i
].len
)
2482 dst
[i
].iov_base
= (void __user
*) (unsigned long) fiov
[i
].base
;
2483 dst
[i
].iov_len
= (size_t) fiov
[i
].len
;
2485 #ifdef CONFIG_COMPAT
2487 (ptr_to_compat(dst
[i
].iov_base
) != fiov
[i
].base
||
2488 (compat_size_t
) dst
[i
].iov_len
!= fiov
[i
].len
))
2498 * For ioctls, there is no generic way to determine how much memory
2499 * needs to be read and/or written. Furthermore, ioctls are allowed
2500 * to dereference the passed pointer, so the parameter requires deep
2501 * copying but FUSE has no idea whatsoever about what to copy in or
2504 * This is solved by allowing FUSE server to retry ioctl with
2505 * necessary in/out iovecs. Let's assume the ioctl implementation
2506 * needs to read in the following structure.
2513 * On the first callout to FUSE server, inarg->in_size and
2514 * inarg->out_size will be NULL; then, the server completes the ioctl
2515 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
2516 * the actual iov array to
2518 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
2520 * which tells FUSE to copy in the requested area and retry the ioctl.
2521 * On the second round, the server has access to the structure and
2522 * from that it can tell what to look for next, so on the invocation,
2523 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
2525 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
2526 * { .iov_base = a.buf, .iov_len = a.buflen } }
2528 * FUSE will copy both struct a and the pointed buffer from the
2529 * process doing the ioctl and retry ioctl with both struct a and the
2532 * This time, FUSE server has everything it needs and completes ioctl
2533 * without FUSE_IOCTL_RETRY which finishes the ioctl call.
2535 * Copying data out works the same way.
2537 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
2538 * automatically initializes in and out iovs by decoding @cmd with
2539 * _IOC_* macros and the server is not allowed to request RETRY. This
2540 * limits ioctl data transfers to well-formed ioctls and is the forced
2541 * behavior for all FUSE servers.
2543 long fuse_do_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
,
2546 struct fuse_file
*ff
= file
->private_data
;
2547 struct fuse_conn
*fc
= ff
->fc
;
2548 struct fuse_ioctl_in inarg
= {
2554 struct fuse_ioctl_out outarg
;
2555 struct fuse_req
*req
= NULL
;
2556 struct page
**pages
= NULL
;
2557 struct iovec
*iov_page
= NULL
;
2558 struct iovec
*in_iov
= NULL
, *out_iov
= NULL
;
2559 unsigned int in_iovs
= 0, out_iovs
= 0, num_pages
= 0, max_pages
;
2560 size_t in_size
, out_size
, transferred
;
2563 #if BITS_PER_LONG == 32
2564 inarg
.flags
|= FUSE_IOCTL_32BIT
;
2566 if (flags
& FUSE_IOCTL_COMPAT
)
2567 inarg
.flags
|= FUSE_IOCTL_32BIT
;
2570 /* assume all the iovs returned by client always fits in a page */
2571 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec
) * FUSE_IOCTL_MAX_IOV
> PAGE_SIZE
);
2574 pages
= kcalloc(FUSE_MAX_PAGES_PER_REQ
, sizeof(pages
[0]), GFP_KERNEL
);
2575 iov_page
= (struct iovec
*) __get_free_page(GFP_KERNEL
);
2576 if (!pages
|| !iov_page
)
2580 * If restricted, initialize IO parameters as encoded in @cmd.
2581 * RETRY from server is not allowed.
2583 if (!(flags
& FUSE_IOCTL_UNRESTRICTED
)) {
2584 struct iovec
*iov
= iov_page
;
2586 iov
->iov_base
= (void __user
*)arg
;
2587 iov
->iov_len
= _IOC_SIZE(cmd
);
2589 if (_IOC_DIR(cmd
) & _IOC_WRITE
) {
2594 if (_IOC_DIR(cmd
) & _IOC_READ
) {
2601 inarg
.in_size
= in_size
= iov_length(in_iov
, in_iovs
);
2602 inarg
.out_size
= out_size
= iov_length(out_iov
, out_iovs
);
2605 * Out data can be used either for actual out data or iovs,
2606 * make sure there always is at least one page.
2608 out_size
= max_t(size_t, out_size
, PAGE_SIZE
);
2609 max_pages
= DIV_ROUND_UP(max(in_size
, out_size
), PAGE_SIZE
);
2611 /* make sure there are enough buffer pages and init request with them */
2613 if (max_pages
> FUSE_MAX_PAGES_PER_REQ
)
2615 while (num_pages
< max_pages
) {
2616 pages
[num_pages
] = alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
);
2617 if (!pages
[num_pages
])
2622 req
= fuse_get_req(fc
, num_pages
);
2628 memcpy(req
->pages
, pages
, sizeof(req
->pages
[0]) * num_pages
);
2629 req
->num_pages
= num_pages
;
2630 fuse_page_descs_length_init(req
, 0, req
->num_pages
);
2632 /* okay, let's send it to the client */
2633 req
->in
.h
.opcode
= FUSE_IOCTL
;
2634 req
->in
.h
.nodeid
= ff
->nodeid
;
2635 req
->in
.numargs
= 1;
2636 req
->in
.args
[0].size
= sizeof(inarg
);
2637 req
->in
.args
[0].value
= &inarg
;
2640 req
->in
.args
[1].size
= in_size
;
2641 req
->in
.argpages
= 1;
2643 err
= fuse_ioctl_copy_user(pages
, in_iov
, in_iovs
, in_size
,
2649 req
->out
.numargs
= 2;
2650 req
->out
.args
[0].size
= sizeof(outarg
);
2651 req
->out
.args
[0].value
= &outarg
;
2652 req
->out
.args
[1].size
= out_size
;
2653 req
->out
.argpages
= 1;
2654 req
->out
.argvar
= 1;
2656 fuse_request_send(fc
, req
);
2657 err
= req
->out
.h
.error
;
2658 transferred
= req
->out
.args
[1].size
;
2659 fuse_put_request(fc
, req
);
2664 /* did it ask for retry? */
2665 if (outarg
.flags
& FUSE_IOCTL_RETRY
) {
2668 /* no retry if in restricted mode */
2670 if (!(flags
& FUSE_IOCTL_UNRESTRICTED
))
2673 in_iovs
= outarg
.in_iovs
;
2674 out_iovs
= outarg
.out_iovs
;
2677 * Make sure things are in boundary, separate checks
2678 * are to protect against overflow.
2681 if (in_iovs
> FUSE_IOCTL_MAX_IOV
||
2682 out_iovs
> FUSE_IOCTL_MAX_IOV
||
2683 in_iovs
+ out_iovs
> FUSE_IOCTL_MAX_IOV
)
2686 vaddr
= kmap_atomic(pages
[0]);
2687 err
= fuse_copy_ioctl_iovec(fc
, iov_page
, vaddr
,
2688 transferred
, in_iovs
+ out_iovs
,
2689 (flags
& FUSE_IOCTL_COMPAT
) != 0);
2690 kunmap_atomic(vaddr
);
2695 out_iov
= in_iov
+ in_iovs
;
2697 err
= fuse_verify_ioctl_iov(in_iov
, in_iovs
);
2701 err
= fuse_verify_ioctl_iov(out_iov
, out_iovs
);
2709 if (transferred
> inarg
.out_size
)
2712 err
= fuse_ioctl_copy_user(pages
, out_iov
, out_iovs
, transferred
, true);
2715 fuse_put_request(fc
, req
);
2716 free_page((unsigned long) iov_page
);
2718 __free_page(pages
[--num_pages
]);
2721 return err
? err
: outarg
.result
;
2723 EXPORT_SYMBOL_GPL(fuse_do_ioctl
);
2725 long fuse_ioctl_common(struct file
*file
, unsigned int cmd
,
2726 unsigned long arg
, unsigned int flags
)
2728 struct inode
*inode
= file_inode(file
);
2729 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2731 if (!fuse_allow_current_process(fc
))
2734 if (is_bad_inode(inode
))
2737 return fuse_do_ioctl(file
, cmd
, arg
, flags
);
2740 static long fuse_file_ioctl(struct file
*file
, unsigned int cmd
,
2743 return fuse_ioctl_common(file
, cmd
, arg
, 0);
2746 static long fuse_file_compat_ioctl(struct file
*file
, unsigned int cmd
,
2749 return fuse_ioctl_common(file
, cmd
, arg
, FUSE_IOCTL_COMPAT
);
2753 * All files which have been polled are linked to RB tree
2754 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2755 * find the matching one.
2757 static struct rb_node
**fuse_find_polled_node(struct fuse_conn
*fc
, u64 kh
,
2758 struct rb_node
**parent_out
)
2760 struct rb_node
**link
= &fc
->polled_files
.rb_node
;
2761 struct rb_node
*last
= NULL
;
2764 struct fuse_file
*ff
;
2767 ff
= rb_entry(last
, struct fuse_file
, polled_node
);
2770 link
= &last
->rb_left
;
2771 else if (kh
> ff
->kh
)
2772 link
= &last
->rb_right
;
2783 * The file is about to be polled. Make sure it's on the polled_files
2784 * RB tree. Note that files once added to the polled_files tree are
2785 * not removed before the file is released. This is because a file
2786 * polled once is likely to be polled again.
2788 static void fuse_register_polled_file(struct fuse_conn
*fc
,
2789 struct fuse_file
*ff
)
2791 spin_lock(&fc
->lock
);
2792 if (RB_EMPTY_NODE(&ff
->polled_node
)) {
2793 struct rb_node
**link
, *uninitialized_var(parent
);
2795 link
= fuse_find_polled_node(fc
, ff
->kh
, &parent
);
2797 rb_link_node(&ff
->polled_node
, parent
, link
);
2798 rb_insert_color(&ff
->polled_node
, &fc
->polled_files
);
2800 spin_unlock(&fc
->lock
);
2803 unsigned fuse_file_poll(struct file
*file
, poll_table
*wait
)
2805 struct fuse_file
*ff
= file
->private_data
;
2806 struct fuse_conn
*fc
= ff
->fc
;
2807 struct fuse_poll_in inarg
= { .fh
= ff
->fh
, .kh
= ff
->kh
};
2808 struct fuse_poll_out outarg
;
2809 struct fuse_req
*req
;
2813 return DEFAULT_POLLMASK
;
2815 poll_wait(file
, &ff
->poll_wait
, wait
);
2816 inarg
.events
= (__u32
)poll_requested_events(wait
);
2819 * Ask for notification iff there's someone waiting for it.
2820 * The client may ignore the flag and always notify.
2822 if (waitqueue_active(&ff
->poll_wait
)) {
2823 inarg
.flags
|= FUSE_POLL_SCHEDULE_NOTIFY
;
2824 fuse_register_polled_file(fc
, ff
);
2827 req
= fuse_get_req_nopages(fc
);
2831 req
->in
.h
.opcode
= FUSE_POLL
;
2832 req
->in
.h
.nodeid
= ff
->nodeid
;
2833 req
->in
.numargs
= 1;
2834 req
->in
.args
[0].size
= sizeof(inarg
);
2835 req
->in
.args
[0].value
= &inarg
;
2836 req
->out
.numargs
= 1;
2837 req
->out
.args
[0].size
= sizeof(outarg
);
2838 req
->out
.args
[0].value
= &outarg
;
2839 fuse_request_send(fc
, req
);
2840 err
= req
->out
.h
.error
;
2841 fuse_put_request(fc
, req
);
2844 return outarg
.revents
;
2845 if (err
== -ENOSYS
) {
2847 return DEFAULT_POLLMASK
;
2851 EXPORT_SYMBOL_GPL(fuse_file_poll
);
2854 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2855 * wakes up the poll waiters.
2857 int fuse_notify_poll_wakeup(struct fuse_conn
*fc
,
2858 struct fuse_notify_poll_wakeup_out
*outarg
)
2860 u64 kh
= outarg
->kh
;
2861 struct rb_node
**link
;
2863 spin_lock(&fc
->lock
);
2865 link
= fuse_find_polled_node(fc
, kh
, NULL
);
2867 struct fuse_file
*ff
;
2869 ff
= rb_entry(*link
, struct fuse_file
, polled_node
);
2870 wake_up_interruptible_sync(&ff
->poll_wait
);
2873 spin_unlock(&fc
->lock
);
2877 static void fuse_do_truncate(struct file
*file
)
2879 struct inode
*inode
= file
->f_mapping
->host
;
2882 attr
.ia_valid
= ATTR_SIZE
;
2883 attr
.ia_size
= i_size_read(inode
);
2885 attr
.ia_file
= file
;
2886 attr
.ia_valid
|= ATTR_FILE
;
2888 fuse_do_setattr(inode
, &attr
, file
);
2891 static inline loff_t
fuse_round_up(loff_t off
)
2893 return round_up(off
, FUSE_MAX_PAGES_PER_REQ
<< PAGE_SHIFT
);
2897 fuse_direct_IO(int rw
, struct kiocb
*iocb
, const struct iovec
*iov
,
2898 loff_t offset
, unsigned long nr_segs
)
2901 struct file
*file
= iocb
->ki_filp
;
2902 struct fuse_file
*ff
= file
->private_data
;
2903 bool async_dio
= ff
->fc
->async_dio
;
2905 struct inode
*inode
;
2907 size_t count
= iov_length(iov
, nr_segs
);
2908 struct fuse_io_priv
*io
;
2911 inode
= file
->f_mapping
->host
;
2912 i_size
= i_size_read(inode
);
2914 if ((rw
== READ
) && (offset
> i_size
))
2917 /* optimization for short read */
2918 if (async_dio
&& rw
!= WRITE
&& offset
+ count
> i_size
) {
2919 if (offset
>= i_size
)
2921 count
= min_t(loff_t
, count
, fuse_round_up(i_size
- offset
));
2924 io
= kmalloc(sizeof(struct fuse_io_priv
), GFP_KERNEL
);
2927 spin_lock_init(&io
->lock
);
2931 io
->offset
= offset
;
2932 io
->write
= (rw
== WRITE
);
2936 * By default, we want to optimize all I/Os with async request
2937 * submission to the client filesystem if supported.
2939 io
->async
= async_dio
;
2943 * We cannot asynchronously extend the size of a file. We have no method
2944 * to wait on real async I/O requests, so we must submit this request
2947 if (!is_sync_kiocb(iocb
) && (offset
+ count
> i_size
) && rw
== WRITE
)
2951 ret
= __fuse_direct_write(io
, iov
, nr_segs
, &pos
);
2953 ret
= __fuse_direct_read(io
, iov
, nr_segs
, &pos
, count
);
2956 fuse_aio_complete(io
, ret
< 0 ? ret
: 0, -1);
2958 /* we have a non-extending, async request, so return */
2959 if (!is_sync_kiocb(iocb
))
2960 return -EIOCBQUEUED
;
2962 ret
= wait_on_sync_kiocb(iocb
);
2969 fuse_write_update_size(inode
, pos
);
2970 else if (ret
< 0 && offset
+ count
> i_size
)
2971 fuse_do_truncate(file
);
2977 static long fuse_file_fallocate(struct file
*file
, int mode
, loff_t offset
,
2980 struct fuse_file
*ff
= file
->private_data
;
2981 struct inode
*inode
= file
->f_inode
;
2982 struct fuse_inode
*fi
= get_fuse_inode(inode
);
2983 struct fuse_conn
*fc
= ff
->fc
;
2984 struct fuse_req
*req
;
2985 struct fuse_fallocate_in inarg
= {
2992 bool lock_inode
= !(mode
& FALLOC_FL_KEEP_SIZE
) ||
2993 (mode
& FALLOC_FL_PUNCH_HOLE
);
2995 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
2998 if (fc
->no_fallocate
)
3002 mutex_lock(&inode
->i_mutex
);
3003 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
3004 loff_t endbyte
= offset
+ length
- 1;
3005 err
= filemap_write_and_wait_range(inode
->i_mapping
,
3010 fuse_sync_writes(inode
);
3014 if (!(mode
& FALLOC_FL_KEEP_SIZE
))
3015 set_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
3017 req
= fuse_get_req_nopages(fc
);
3023 req
->in
.h
.opcode
= FUSE_FALLOCATE
;
3024 req
->in
.h
.nodeid
= ff
->nodeid
;
3025 req
->in
.numargs
= 1;
3026 req
->in
.args
[0].size
= sizeof(inarg
);
3027 req
->in
.args
[0].value
= &inarg
;
3028 fuse_request_send(fc
, req
);
3029 err
= req
->out
.h
.error
;
3030 if (err
== -ENOSYS
) {
3031 fc
->no_fallocate
= 1;
3034 fuse_put_request(fc
, req
);
3039 /* we could have extended the file */
3040 if (!(mode
& FALLOC_FL_KEEP_SIZE
)) {
3041 bool changed
= fuse_write_update_size(inode
, offset
+ length
);
3043 if (changed
&& fc
->writeback_cache
)
3044 file_update_time(file
);
3047 if (mode
& FALLOC_FL_PUNCH_HOLE
)
3048 truncate_pagecache_range(inode
, offset
, offset
+ length
- 1);
3050 fuse_invalidate_attr(inode
);
3053 if (!(mode
& FALLOC_FL_KEEP_SIZE
))
3054 clear_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
3057 mutex_unlock(&inode
->i_mutex
);
3062 static const struct file_operations fuse_file_operations
= {
3063 .llseek
= fuse_file_llseek
,
3064 .read
= do_sync_read
,
3065 .aio_read
= fuse_file_aio_read
,
3066 .write
= do_sync_write
,
3067 .aio_write
= fuse_file_aio_write
,
3068 .mmap
= fuse_file_mmap
,
3070 .flush
= fuse_flush
,
3071 .release
= fuse_release
,
3072 .fsync
= fuse_fsync
,
3073 .lock
= fuse_file_lock
,
3074 .flock
= fuse_file_flock
,
3075 .splice_read
= generic_file_splice_read
,
3076 .unlocked_ioctl
= fuse_file_ioctl
,
3077 .compat_ioctl
= fuse_file_compat_ioctl
,
3078 .poll
= fuse_file_poll
,
3079 .fallocate
= fuse_file_fallocate
,
3082 static const struct file_operations fuse_direct_io_file_operations
= {
3083 .llseek
= fuse_file_llseek
,
3084 .read
= fuse_direct_read
,
3085 .write
= fuse_direct_write
,
3086 .mmap
= fuse_direct_mmap
,
3088 .flush
= fuse_flush
,
3089 .release
= fuse_release
,
3090 .fsync
= fuse_fsync
,
3091 .lock
= fuse_file_lock
,
3092 .flock
= fuse_file_flock
,
3093 .unlocked_ioctl
= fuse_file_ioctl
,
3094 .compat_ioctl
= fuse_file_compat_ioctl
,
3095 .poll
= fuse_file_poll
,
3096 .fallocate
= fuse_file_fallocate
,
3097 /* no splice_read */
3100 static const struct address_space_operations fuse_file_aops
= {
3101 .readpage
= fuse_readpage
,
3102 .writepage
= fuse_writepage
,
3103 .writepages
= fuse_writepages
,
3104 .launder_page
= fuse_launder_page
,
3105 .readpages
= fuse_readpages
,
3106 .set_page_dirty
= __set_page_dirty_nobuffers
,
3108 .direct_IO
= fuse_direct_IO
,
3109 .write_begin
= fuse_write_begin
,
3110 .write_end
= fuse_write_end
,
3113 void fuse_init_file_inode(struct inode
*inode
)
3115 inode
->i_fop
= &fuse_file_operations
;
3116 inode
->i_data
.a_ops
= &fuse_file_aops
;