]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/fuse/file.c
fuse: fix possibly missed wake-up after abort
[mirror_ubuntu-bionic-kernel.git] / fs / fuse / file.c
CommitLineData
b6aeaded
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
b6aeaded
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/pagemap.h>
12#include <linux/slab.h>
13#include <linux/kernel.h>
e8edc6e0 14#include <linux/sched.h>
08cbf542 15#include <linux/module.h>
d9d318d3 16#include <linux/compat.h>
478e0841 17#include <linux/swap.h>
3634a632 18#include <linux/falloc.h>
e2e40f2c 19#include <linux/uio.h>
b6aeaded 20
4b6f5d20 21static const struct file_operations fuse_direct_io_file_operations;
45323fb7 22
91fe96b4
MS
23static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
24 int opcode, struct fuse_open_out *outargp)
b6aeaded 25{
b6aeaded 26 struct fuse_open_in inarg;
7078187a 27 FUSE_ARGS(args);
fd72faac
MS
28
29 memset(&inarg, 0, sizeof(inarg));
6ff958ed
MS
30 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
31 if (!fc->atomic_o_trunc)
32 inarg.flags &= ~O_TRUNC;
7078187a
MS
33 args.in.h.opcode = opcode;
34 args.in.h.nodeid = nodeid;
35 args.in.numargs = 1;
36 args.in.args[0].size = sizeof(inarg);
37 args.in.args[0].value = &inarg;
38 args.out.numargs = 1;
39 args.out.args[0].size = sizeof(*outargp);
40 args.out.args[0].value = outargp;
fd72faac 41
7078187a 42 return fuse_simple_request(fc, &args);
fd72faac
MS
43}
44
acf99433 45struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
fd72faac
MS
46{
47 struct fuse_file *ff;
6b2db28a 48
68227c03 49 ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
6b2db28a
TH
50 if (unlikely(!ff))
51 return NULL;
52
da5e4714 53 ff->fc = fc;
4250c066 54 ff->reserved_req = fuse_request_alloc(0);
6b2db28a
TH
55 if (unlikely(!ff->reserved_req)) {
56 kfree(ff);
57 return NULL;
fd72faac 58 }
6b2db28a
TH
59
60 INIT_LIST_HEAD(&ff->write_entry);
4e8c2eb5 61 refcount_set(&ff->count, 1);
6b2db28a
TH
62 RB_CLEAR_NODE(&ff->polled_node);
63 init_waitqueue_head(&ff->poll_wait);
64
65 spin_lock(&fc->lock);
66 ff->kh = ++fc->khctr;
67 spin_unlock(&fc->lock);
68
fd72faac
MS
69 return ff;
70}
71
72void fuse_file_free(struct fuse_file *ff)
73{
33649c91 74 fuse_request_free(ff->reserved_req);
fd72faac
MS
75 kfree(ff);
76}
77
267d8444 78static struct fuse_file *fuse_file_get(struct fuse_file *ff)
c756e0a4 79{
4e8c2eb5 80 refcount_inc(&ff->count);
c756e0a4
MS
81 return ff;
82}
83
819c4b3b
MS
84static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
85{
baebccbe 86 iput(req->misc.release.inode);
819c4b3b
MS
87}
88
5a18ec17 89static void fuse_file_put(struct fuse_file *ff, bool sync)
c756e0a4 90{
4e8c2eb5 91 if (refcount_dec_and_test(&ff->count)) {
c756e0a4 92 struct fuse_req *req = ff->reserved_req;
8b0797a4 93
7678ac50
AG
94 if (ff->fc->no_open) {
95 /*
96 * Drop the release request when client does not
97 * implement 'open'
98 */
825d6d33 99 __clear_bit(FR_BACKGROUND, &req->flags);
baebccbe 100 iput(req->misc.release.inode);
7678ac50
AG
101 fuse_put_request(ff->fc, req);
102 } else if (sync) {
2e38bea9 103 __set_bit(FR_FORCE, &req->flags);
825d6d33 104 __clear_bit(FR_BACKGROUND, &req->flags);
5a18ec17 105 fuse_request_send(ff->fc, req);
baebccbe 106 iput(req->misc.release.inode);
5a18ec17
MS
107 fuse_put_request(ff->fc, req);
108 } else {
109 req->end = fuse_release_end;
825d6d33 110 __set_bit(FR_BACKGROUND, &req->flags);
5a18ec17
MS
111 fuse_request_send_background(ff->fc, req);
112 }
c756e0a4
MS
113 kfree(ff);
114 }
115}
116
08cbf542
TH
117int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
118 bool isdir)
91fe96b4 119{
91fe96b4 120 struct fuse_file *ff;
91fe96b4
MS
121 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
122
123 ff = fuse_file_alloc(fc);
124 if (!ff)
125 return -ENOMEM;
126
7678ac50
AG
127 ff->fh = 0;
128 ff->open_flags = FOPEN_KEEP_CACHE; /* Default for no-open */
129 if (!fc->no_open || isdir) {
130 struct fuse_open_out outarg;
131 int err;
132
133 err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
134 if (!err) {
135 ff->fh = outarg.fh;
136 ff->open_flags = outarg.open_flags;
137
138 } else if (err != -ENOSYS || isdir) {
139 fuse_file_free(ff);
140 return err;
141 } else {
142 fc->no_open = 1;
143 }
91fe96b4
MS
144 }
145
146 if (isdir)
7678ac50 147 ff->open_flags &= ~FOPEN_DIRECT_IO;
91fe96b4 148
91fe96b4 149 ff->nodeid = nodeid;
267d8444 150 file->private_data = ff;
91fe96b4
MS
151
152 return 0;
153}
08cbf542 154EXPORT_SYMBOL_GPL(fuse_do_open);
91fe96b4 155
650b22b9
PE
156static void fuse_link_write_file(struct file *file)
157{
158 struct inode *inode = file_inode(file);
159 struct fuse_conn *fc = get_fuse_conn(inode);
160 struct fuse_inode *fi = get_fuse_inode(inode);
161 struct fuse_file *ff = file->private_data;
162 /*
163 * file may be written through mmap, so chain it onto the
164 * inodes's write_file list
165 */
166 spin_lock(&fc->lock);
167 if (list_empty(&ff->write_entry))
168 list_add(&ff->write_entry, &fi->write_files);
169 spin_unlock(&fc->lock);
170}
171
c7b7143c 172void fuse_finish_open(struct inode *inode, struct file *file)
fd72faac 173{
c7b7143c 174 struct fuse_file *ff = file->private_data;
a0822c55 175 struct fuse_conn *fc = get_fuse_conn(inode);
c7b7143c
MS
176
177 if (ff->open_flags & FOPEN_DIRECT_IO)
fd72faac 178 file->f_op = &fuse_direct_io_file_operations;
c7b7143c 179 if (!(ff->open_flags & FOPEN_KEEP_CACHE))
b1009979 180 invalidate_inode_pages2(inode->i_mapping);
c7b7143c 181 if (ff->open_flags & FOPEN_NONSEEKABLE)
a7c1b990 182 nonseekable_open(inode, file);
a0822c55
KS
183 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
184 struct fuse_inode *fi = get_fuse_inode(inode);
185
186 spin_lock(&fc->lock);
187 fi->attr_version = ++fc->attr_version;
188 i_size_write(inode, 0);
189 spin_unlock(&fc->lock);
190 fuse_invalidate_attr(inode);
75caeecd
MP
191 if (fc->writeback_cache)
192 file_update_time(file);
a0822c55 193 }
4d99ff8f
PE
194 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
195 fuse_link_write_file(file);
fd72faac
MS
196}
197
91fe96b4 198int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
fd72faac 199{
acf99433 200 struct fuse_conn *fc = get_fuse_conn(inode);
b6aeaded 201 int err;
75caeecd
MP
202 bool lock_inode = (file->f_flags & O_TRUNC) &&
203 fc->atomic_o_trunc &&
204 fc->writeback_cache;
b6aeaded
MS
205
206 err = generic_file_open(inode, file);
207 if (err)
208 return err;
209
75caeecd 210 if (lock_inode)
5955102c 211 inode_lock(inode);
75caeecd 212
91fe96b4 213 err = fuse_do_open(fc, get_node_id(inode), file, isdir);
b6aeaded 214
75caeecd
MP
215 if (!err)
216 fuse_finish_open(inode, file);
91fe96b4 217
75caeecd 218 if (lock_inode)
5955102c 219 inode_unlock(inode);
75caeecd
MP
220
221 return err;
b6aeaded
MS
222}
223
8b0797a4 224static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
64c6d8ed 225{
8b0797a4 226 struct fuse_conn *fc = ff->fc;
33649c91 227 struct fuse_req *req = ff->reserved_req;
b57d4264 228 struct fuse_release_in *inarg = &req->misc.release.in;
b6aeaded 229
8b0797a4
MS
230 spin_lock(&fc->lock);
231 list_del(&ff->write_entry);
232 if (!RB_EMPTY_NODE(&ff->polled_node))
233 rb_erase(&ff->polled_node, &fc->polled_files);
234 spin_unlock(&fc->lock);
235
357ccf2b 236 wake_up_interruptible_all(&ff->poll_wait);
8b0797a4 237
b6aeaded 238 inarg->fh = ff->fh;
fd72faac 239 inarg->flags = flags;
51eb01e7 240 req->in.h.opcode = opcode;
c7b7143c 241 req->in.h.nodeid = ff->nodeid;
b6aeaded
MS
242 req->in.numargs = 1;
243 req->in.args[0].size = sizeof(struct fuse_release_in);
244 req->in.args[0].value = inarg;
fd72faac
MS
245}
246
8b0797a4 247void fuse_release_common(struct file *file, int opcode)
fd72faac 248{
9a87ad3d
MS
249 struct fuse_file *ff = file->private_data;
250 struct fuse_req *req = ff->reserved_req;
6b2db28a 251
8b0797a4 252 fuse_prepare_release(ff, file->f_flags, opcode);
6b2db28a 253
37fb3a30
MS
254 if (ff->flock) {
255 struct fuse_release_in *inarg = &req->misc.release.in;
256 inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
257 inarg->lock_owner = fuse_lock_owner_id(ff->fc,
258 (fl_owner_t) file);
259 }
baebccbe
MS
260 /* Hold inode until release is finished */
261 req->misc.release.inode = igrab(file_inode(file));
6b2db28a 262
6b2db28a
TH
263 /*
264 * Normally this will send the RELEASE request, however if
265 * some asynchronous READ or WRITE requests are outstanding,
266 * the sending will be delayed.
5a18ec17
MS
267 *
268 * Make the release synchronous if this is a fuseblk mount,
269 * synchronous RELEASE is allowed (and desirable) in this case
270 * because the server can be trusted not to screw up.
6b2db28a 271 */
5a18ec17 272 fuse_file_put(ff, ff->fc->destroy_req != NULL);
b6aeaded
MS
273}
274
04730fef
MS
275static int fuse_open(struct inode *inode, struct file *file)
276{
91fe96b4 277 return fuse_open_common(inode, file, false);
04730fef
MS
278}
279
280static int fuse_release(struct inode *inode, struct file *file)
281{
e7cc133c
PE
282 struct fuse_conn *fc = get_fuse_conn(inode);
283
284 /* see fuse_vma_close() for !writeback_cache case */
285 if (fc->writeback_cache)
1e18bda8 286 write_inode_now(inode, 1);
b0aa7606 287
8b0797a4
MS
288 fuse_release_common(file, FUSE_RELEASE);
289
290 /* return value is ignored by VFS */
291 return 0;
292}
293
294void fuse_sync_release(struct fuse_file *ff, int flags)
295{
4e8c2eb5 296 WARN_ON(refcount_read(&ff->count) > 1);
8b0797a4 297 fuse_prepare_release(ff, flags, FUSE_RELEASE);
267d8444
MS
298 /*
299 * iput(NULL) is a no-op and since the refcount is 1 and everything's
300 * synchronous, we are fine with not doing igrab() here"
301 */
302 fuse_file_put(ff, true);
04730fef 303}
08cbf542 304EXPORT_SYMBOL_GPL(fuse_sync_release);
04730fef 305
71421259 306/*
9c8ef561
MS
307 * Scramble the ID space with XTEA, so that the value of the files_struct
308 * pointer is not exposed to userspace.
71421259 309 */
f3332114 310u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
71421259 311{
9c8ef561
MS
312 u32 *k = fc->scramble_key;
313 u64 v = (unsigned long) id;
314 u32 v0 = v;
315 u32 v1 = v >> 32;
316 u32 sum = 0;
317 int i;
318
319 for (i = 0; i < 32; i++) {
320 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
321 sum += 0x9E3779B9;
322 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
323 }
324
325 return (u64) v0 + ((u64) v1 << 32);
71421259
MS
326}
327
3be5a52b 328/*
ea8cd333 329 * Check if any page in a range is under writeback
3be5a52b
MS
330 *
331 * This is currently done by walking the list of writepage requests
332 * for the inode, which can be pretty inefficient.
333 */
ea8cd333
PE
334static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
335 pgoff_t idx_to)
3be5a52b
MS
336{
337 struct fuse_conn *fc = get_fuse_conn(inode);
338 struct fuse_inode *fi = get_fuse_inode(inode);
339 struct fuse_req *req;
340 bool found = false;
341
342 spin_lock(&fc->lock);
343 list_for_each_entry(req, &fi->writepages, writepages_entry) {
344 pgoff_t curr_index;
345
346 BUG_ON(req->inode != inode);
09cbfeaf 347 curr_index = req->misc.write.in.offset >> PAGE_SHIFT;
ea8cd333
PE
348 if (idx_from < curr_index + req->num_pages &&
349 curr_index <= idx_to) {
3be5a52b
MS
350 found = true;
351 break;
352 }
353 }
354 spin_unlock(&fc->lock);
355
356 return found;
357}
358
ea8cd333
PE
359static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
360{
361 return fuse_range_is_writeback(inode, index, index);
362}
363
3be5a52b
MS
364/*
365 * Wait for page writeback to be completed.
366 *
367 * Since fuse doesn't rely on the VM writeback tracking, this has to
368 * use some other means.
369 */
370static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
371{
372 struct fuse_inode *fi = get_fuse_inode(inode);
373
374 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
375 return 0;
376}
377
fe38d7df
MP
378/*
379 * Wait for all pending writepages on the inode to finish.
380 *
381 * This is currently done by blocking further writes with FUSE_NOWRITE
382 * and waiting for all sent writes to complete.
383 *
384 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
385 * could conflict with truncation.
386 */
387static void fuse_sync_writes(struct inode *inode)
388{
389 fuse_set_nowrite(inode);
390 fuse_release_nowrite(inode);
391}
392
75e1fcc0 393static int fuse_flush(struct file *file, fl_owner_t id)
b6aeaded 394{
6131ffaa 395 struct inode *inode = file_inode(file);
b6aeaded
MS
396 struct fuse_conn *fc = get_fuse_conn(inode);
397 struct fuse_file *ff = file->private_data;
398 struct fuse_req *req;
399 struct fuse_flush_in inarg;
400 int err;
401
248d86e8
MS
402 if (is_bad_inode(inode))
403 return -EIO;
404
b6aeaded
MS
405 if (fc->no_flush)
406 return 0;
407
1e18bda8 408 err = write_inode_now(inode, 1);
fe38d7df
MP
409 if (err)
410 return err;
411
5955102c 412 inode_lock(inode);
fe38d7df 413 fuse_sync_writes(inode);
5955102c 414 inode_unlock(inode);
fe38d7df 415
4a7f4e88 416 err = filemap_check_errors(file->f_mapping);
9ebce595
MP
417 if (err)
418 return err;
419
b111c8c0 420 req = fuse_get_req_nofail_nopages(fc, file);
b6aeaded
MS
421 memset(&inarg, 0, sizeof(inarg));
422 inarg.fh = ff->fh;
9c8ef561 423 inarg.lock_owner = fuse_lock_owner_id(fc, id);
b6aeaded
MS
424 req->in.h.opcode = FUSE_FLUSH;
425 req->in.h.nodeid = get_node_id(inode);
b6aeaded
MS
426 req->in.numargs = 1;
427 req->in.args[0].size = sizeof(inarg);
428 req->in.args[0].value = &inarg;
825d6d33 429 __set_bit(FR_FORCE, &req->flags);
b93f858a 430 fuse_request_send(fc, req);
b6aeaded
MS
431 err = req->out.h.error;
432 fuse_put_request(fc, req);
433 if (err == -ENOSYS) {
434 fc->no_flush = 1;
435 err = 0;
436 }
437 return err;
438}
439
02c24a82
JB
440int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
441 int datasync, int isdir)
b6aeaded 442{
7ea80859 443 struct inode *inode = file->f_mapping->host;
b6aeaded
MS
444 struct fuse_conn *fc = get_fuse_conn(inode);
445 struct fuse_file *ff = file->private_data;
7078187a 446 FUSE_ARGS(args);
b6aeaded
MS
447 struct fuse_fsync_in inarg;
448 int err;
449
248d86e8
MS
450 if (is_bad_inode(inode))
451 return -EIO;
452
5955102c 453 inode_lock(inode);
02c24a82 454
3be5a52b
MS
455 /*
456 * Start writeback against all dirty pages of the inode, then
457 * wait for all outstanding writes, before sending the FSYNC
458 * request.
459 */
7e51fe1d 460 err = file_write_and_wait_range(file, start, end);
3be5a52b 461 if (err)
02c24a82 462 goto out;
3be5a52b
MS
463
464 fuse_sync_writes(inode);
ac7f052b
AK
465
466 /*
467 * Due to implementation of fuse writeback
7e51fe1d 468 * file_write_and_wait_range() does not catch errors.
ac7f052b
AK
469 * We have to do this directly after fuse_sync_writes()
470 */
7e51fe1d 471 err = file_check_and_advance_wb_err(file);
ac7f052b
AK
472 if (err)
473 goto out;
474
1e18bda8
MS
475 err = sync_inode_metadata(inode, 1);
476 if (err)
477 goto out;
3be5a52b 478
22401e7b
MS
479 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
480 goto out;
b0aa7606 481
b6aeaded
MS
482 memset(&inarg, 0, sizeof(inarg));
483 inarg.fh = ff->fh;
484 inarg.fsync_flags = datasync ? 1 : 0;
7078187a
MS
485 args.in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
486 args.in.h.nodeid = get_node_id(inode);
487 args.in.numargs = 1;
488 args.in.args[0].size = sizeof(inarg);
489 args.in.args[0].value = &inarg;
490 err = fuse_simple_request(fc, &args);
b6aeaded 491 if (err == -ENOSYS) {
82547981
MS
492 if (isdir)
493 fc->no_fsyncdir = 1;
494 else
495 fc->no_fsync = 1;
b6aeaded
MS
496 err = 0;
497 }
02c24a82 498out:
5955102c 499 inode_unlock(inode);
b6aeaded
MS
500 return err;
501}
502
02c24a82
JB
503static int fuse_fsync(struct file *file, loff_t start, loff_t end,
504 int datasync)
82547981 505{
02c24a82 506 return fuse_fsync_common(file, start, end, datasync, 0);
82547981
MS
507}
508
2106cb18
MS
509void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
510 size_t count, int opcode)
b6aeaded 511{
5c5c5e51 512 struct fuse_read_in *inarg = &req->misc.read.in;
a6643094 513 struct fuse_file *ff = file->private_data;
b6aeaded 514
361b1eb5
MS
515 inarg->fh = ff->fh;
516 inarg->offset = pos;
517 inarg->size = count;
a6643094 518 inarg->flags = file->f_flags;
361b1eb5 519 req->in.h.opcode = opcode;
2106cb18 520 req->in.h.nodeid = ff->nodeid;
b6aeaded
MS
521 req->in.numargs = 1;
522 req->in.args[0].size = sizeof(struct fuse_read_in);
c1aa96a5 523 req->in.args[0].value = inarg;
b6aeaded
MS
524 req->out.argvar = 1;
525 req->out.numargs = 1;
526 req->out.args[0].size = count;
b6aeaded
MS
527}
528
8fba54ae 529static void fuse_release_user_pages(struct fuse_req *req, bool should_dirty)
187c5c36
MP
530{
531 unsigned i;
532
533 for (i = 0; i < req->num_pages; i++) {
534 struct page *page = req->pages[i];
8fba54ae 535 if (should_dirty)
187c5c36
MP
536 set_page_dirty_lock(page);
537 put_page(page);
538 }
539}
540
744742d6
SF
541static void fuse_io_release(struct kref *kref)
542{
543 kfree(container_of(kref, struct fuse_io_priv, refcnt));
544}
545
9d5722b7
CH
546static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
547{
548 if (io->err)
549 return io->err;
550
551 if (io->bytes >= 0 && io->write)
552 return -EIO;
553
554 return io->bytes < 0 ? io->size : io->bytes;
555}
556
01e9d11a
MP
557/**
558 * In case of short read, the caller sets 'pos' to the position of
559 * actual end of fuse request in IO request. Otherwise, if bytes_requested
560 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
561 *
562 * An example:
563 * User requested DIO read of 64K. It was splitted into two 32K fuse requests,
564 * both submitted asynchronously. The first of them was ACKed by userspace as
565 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
566 * second request was ACKed as short, e.g. only 1K was read, resulting in
567 * pos == 33K.
568 *
569 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
570 * will be equal to the length of the longest contiguous fragment of
571 * transferred data starting from the beginning of IO request.
572 */
573static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
574{
575 int left;
576
577 spin_lock(&io->lock);
578 if (err)
579 io->err = io->err ? : err;
580 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
581 io->bytes = pos;
582
583 left = --io->reqs;
7879c4e5 584 if (!left && io->blocking)
9d5722b7 585 complete(io->done);
01e9d11a
MP
586 spin_unlock(&io->lock);
587
7879c4e5 588 if (!left && !io->blocking) {
9d5722b7 589 ssize_t res = fuse_get_res_by_io(io);
01e9d11a 590
9d5722b7
CH
591 if (res >= 0) {
592 struct inode *inode = file_inode(io->iocb->ki_filp);
593 struct fuse_conn *fc = get_fuse_conn(inode);
594 struct fuse_inode *fi = get_fuse_inode(inode);
01e9d11a 595
9d5722b7
CH
596 spin_lock(&fc->lock);
597 fi->attr_version = ++fc->attr_version;
598 spin_unlock(&fc->lock);
01e9d11a
MP
599 }
600
04b2fa9f 601 io->iocb->ki_complete(io->iocb, res, 0);
01e9d11a 602 }
744742d6
SF
603
604 kref_put(&io->refcnt, fuse_io_release);
01e9d11a
MP
605}
606
607static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
608{
609 struct fuse_io_priv *io = req->io;
610 ssize_t pos = -1;
611
61c12b49 612 fuse_release_user_pages(req, io->should_dirty);
01e9d11a
MP
613
614 if (io->write) {
615 if (req->misc.write.in.size != req->misc.write.out.size)
616 pos = req->misc.write.in.offset - io->offset +
617 req->misc.write.out.size;
618 } else {
619 if (req->misc.read.in.size != req->out.args[0].size)
620 pos = req->misc.read.in.offset - io->offset +
621 req->out.args[0].size;
622 }
623
624 fuse_aio_complete(io, req->out.h.error, pos);
625}
626
627static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
628 size_t num_bytes, struct fuse_io_priv *io)
629{
630 spin_lock(&io->lock);
744742d6 631 kref_get(&io->refcnt);
01e9d11a
MP
632 io->size += num_bytes;
633 io->reqs++;
634 spin_unlock(&io->lock);
635
636 req->io = io;
637 req->end = fuse_aio_complete_req;
638
36cf66ed 639 __fuse_get_request(req);
01e9d11a
MP
640 fuse_request_send_background(fc, req);
641
642 return num_bytes;
643}
644
36cf66ed 645static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io,
2106cb18 646 loff_t pos, size_t count, fl_owner_t owner)
04730fef 647{
e1c0eecb 648 struct file *file = io->iocb->ki_filp;
2106cb18
MS
649 struct fuse_file *ff = file->private_data;
650 struct fuse_conn *fc = ff->fc;
f3332114 651
2106cb18 652 fuse_read_fill(req, file, pos, count, FUSE_READ);
f3332114 653 if (owner != NULL) {
5c5c5e51 654 struct fuse_read_in *inarg = &req->misc.read.in;
f3332114
MS
655
656 inarg->read_flags |= FUSE_READ_LOCKOWNER;
657 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
658 }
36cf66ed
MP
659
660 if (io->async)
661 return fuse_async_req_send(fc, req, count, io);
662
b93f858a 663 fuse_request_send(fc, req);
361b1eb5 664 return req->out.args[0].size;
04730fef
MS
665}
666
5c5c5e51
MS
667static void fuse_read_update_size(struct inode *inode, loff_t size,
668 u64 attr_ver)
669{
670 struct fuse_conn *fc = get_fuse_conn(inode);
671 struct fuse_inode *fi = get_fuse_inode(inode);
672
673 spin_lock(&fc->lock);
06a7c3c2
MP
674 if (attr_ver == fi->attr_version && size < inode->i_size &&
675 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
5c5c5e51
MS
676 fi->attr_version = ++fc->attr_version;
677 i_size_write(inode, size);
678 }
679 spin_unlock(&fc->lock);
680}
681
a92adc82
PE
682static void fuse_short_read(struct fuse_req *req, struct inode *inode,
683 u64 attr_ver)
684{
685 size_t num_read = req->out.args[0].size;
8373200b
PE
686 struct fuse_conn *fc = get_fuse_conn(inode);
687
688 if (fc->writeback_cache) {
689 /*
690 * A hole in a file. Some data after the hole are in page cache,
691 * but have not reached the client fs yet. So, the hole is not
692 * present there.
693 */
694 int i;
09cbfeaf
KS
695 int start_idx = num_read >> PAGE_SHIFT;
696 size_t off = num_read & (PAGE_SIZE - 1);
a92adc82 697
8373200b 698 for (i = start_idx; i < req->num_pages; i++) {
09cbfeaf 699 zero_user_segment(req->pages[i], off, PAGE_SIZE);
8373200b
PE
700 off = 0;
701 }
702 } else {
703 loff_t pos = page_offset(req->pages[0]) + num_read;
704 fuse_read_update_size(inode, pos, attr_ver);
705 }
a92adc82
PE
706}
707
482fce55 708static int fuse_do_readpage(struct file *file, struct page *page)
b6aeaded 709{
e1c0eecb
MS
710 struct kiocb iocb;
711 struct fuse_io_priv io;
b6aeaded
MS
712 struct inode *inode = page->mapping->host;
713 struct fuse_conn *fc = get_fuse_conn(inode);
248d86e8 714 struct fuse_req *req;
5c5c5e51
MS
715 size_t num_read;
716 loff_t pos = page_offset(page);
09cbfeaf 717 size_t count = PAGE_SIZE;
5c5c5e51 718 u64 attr_ver;
248d86e8
MS
719 int err;
720
3be5a52b 721 /*
25985edc 722 * Page writeback can extend beyond the lifetime of the
3be5a52b
MS
723 * page-cache page, so make sure we read a properly synced
724 * page.
725 */
726 fuse_wait_on_page_writeback(inode, page->index);
727
b111c8c0 728 req = fuse_get_req(fc, 1);
ce1d5a49 729 if (IS_ERR(req))
482fce55 730 return PTR_ERR(req);
b6aeaded 731
5c5c5e51
MS
732 attr_ver = fuse_get_attr_version(fc);
733
b6aeaded 734 req->out.page_zeroing = 1;
f4975c67 735 req->out.argpages = 1;
b6aeaded
MS
736 req->num_pages = 1;
737 req->pages[0] = page;
85f40aec 738 req->page_descs[0].length = count;
e1c0eecb
MS
739 init_sync_kiocb(&iocb, file);
740 io = (struct fuse_io_priv) FUSE_IO_PRIV_SYNC(&iocb);
36cf66ed 741 num_read = fuse_send_read(req, &io, pos, count, NULL);
b6aeaded 742 err = req->out.h.error;
5c5c5e51
MS
743
744 if (!err) {
745 /*
746 * Short read means EOF. If file size is larger, truncate it
747 */
748 if (num_read < count)
a92adc82 749 fuse_short_read(req, inode, attr_ver);
5c5c5e51 750
b6aeaded 751 SetPageUptodate(page);
5c5c5e51
MS
752 }
753
a92adc82 754 fuse_put_request(fc, req);
482fce55
MP
755
756 return err;
757}
758
759static int fuse_readpage(struct file *file, struct page *page)
760{
761 struct inode *inode = page->mapping->host;
762 int err;
763
764 err = -EIO;
765 if (is_bad_inode(inode))
766 goto out;
767
768 err = fuse_do_readpage(file, page);
451418fc 769 fuse_invalidate_atime(inode);
b6aeaded
MS
770 out:
771 unlock_page(page);
772 return err;
773}
774
c1aa96a5 775static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
db50b96c 776{
c1aa96a5 777 int i;
5c5c5e51
MS
778 size_t count = req->misc.read.in.size;
779 size_t num_read = req->out.args[0].size;
ce534fb0 780 struct address_space *mapping = NULL;
c1aa96a5 781
ce534fb0
MS
782 for (i = 0; mapping == NULL && i < req->num_pages; i++)
783 mapping = req->pages[i]->mapping;
5c5c5e51 784
ce534fb0
MS
785 if (mapping) {
786 struct inode *inode = mapping->host;
787
788 /*
789 * Short read means EOF. If file size is larger, truncate it
790 */
a92adc82
PE
791 if (!req->out.h.error && num_read < count)
792 fuse_short_read(req, inode, req->misc.read.attr_ver);
ce534fb0 793
451418fc 794 fuse_invalidate_atime(inode);
ce534fb0 795 }
c1aa96a5 796
db50b96c
MS
797 for (i = 0; i < req->num_pages; i++) {
798 struct page *page = req->pages[i];
799 if (!req->out.h.error)
800 SetPageUptodate(page);
c1aa96a5
MS
801 else
802 SetPageError(page);
db50b96c 803 unlock_page(page);
09cbfeaf 804 put_page(page);
db50b96c 805 }
c756e0a4 806 if (req->ff)
5a18ec17 807 fuse_file_put(req->ff, false);
c1aa96a5
MS
808}
809
2106cb18 810static void fuse_send_readpages(struct fuse_req *req, struct file *file)
c1aa96a5 811{
2106cb18
MS
812 struct fuse_file *ff = file->private_data;
813 struct fuse_conn *fc = ff->fc;
c1aa96a5 814 loff_t pos = page_offset(req->pages[0]);
09cbfeaf 815 size_t count = req->num_pages << PAGE_SHIFT;
f4975c67
MS
816
817 req->out.argpages = 1;
c1aa96a5 818 req->out.page_zeroing = 1;
ce534fb0 819 req->out.page_replace = 1;
2106cb18 820 fuse_read_fill(req, file, pos, count, FUSE_READ);
5c5c5e51 821 req->misc.read.attr_ver = fuse_get_attr_version(fc);
9cd68455 822 if (fc->async_read) {
c756e0a4 823 req->ff = fuse_file_get(ff);
9cd68455 824 req->end = fuse_readpages_end;
b93f858a 825 fuse_request_send_background(fc, req);
9cd68455 826 } else {
b93f858a 827 fuse_request_send(fc, req);
9cd68455 828 fuse_readpages_end(fc, req);
e9bb09dd 829 fuse_put_request(fc, req);
9cd68455 830 }
db50b96c
MS
831}
832
c756e0a4 833struct fuse_fill_data {
db50b96c 834 struct fuse_req *req;
a6643094 835 struct file *file;
db50b96c 836 struct inode *inode;
f8dbdf81 837 unsigned nr_pages;
db50b96c
MS
838};
839
840static int fuse_readpages_fill(void *_data, struct page *page)
841{
c756e0a4 842 struct fuse_fill_data *data = _data;
db50b96c
MS
843 struct fuse_req *req = data->req;
844 struct inode *inode = data->inode;
845 struct fuse_conn *fc = get_fuse_conn(inode);
846
3be5a52b
MS
847 fuse_wait_on_page_writeback(inode, page->index);
848
db50b96c
MS
849 if (req->num_pages &&
850 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
09cbfeaf 851 (req->num_pages + 1) * PAGE_SIZE > fc->max_read ||
db50b96c 852 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
f8dbdf81
MP
853 int nr_alloc = min_t(unsigned, data->nr_pages,
854 FUSE_MAX_PAGES_PER_REQ);
2106cb18 855 fuse_send_readpages(req, data->file);
8b41e671
MP
856 if (fc->async_read)
857 req = fuse_get_req_for_background(fc, nr_alloc);
858 else
859 req = fuse_get_req(fc, nr_alloc);
860
861 data->req = req;
ce1d5a49 862 if (IS_ERR(req)) {
db50b96c 863 unlock_page(page);
ce1d5a49 864 return PTR_ERR(req);
db50b96c 865 }
db50b96c 866 }
f8dbdf81
MP
867
868 if (WARN_ON(req->num_pages >= req->max_pages)) {
869 fuse_put_request(fc, req);
870 return -EIO;
871 }
872
09cbfeaf 873 get_page(page);
db50b96c 874 req->pages[req->num_pages] = page;
85f40aec 875 req->page_descs[req->num_pages].length = PAGE_SIZE;
1729a16c 876 req->num_pages++;
f8dbdf81 877 data->nr_pages--;
db50b96c
MS
878 return 0;
879}
880
881static int fuse_readpages(struct file *file, struct address_space *mapping,
882 struct list_head *pages, unsigned nr_pages)
883{
884 struct inode *inode = mapping->host;
885 struct fuse_conn *fc = get_fuse_conn(inode);
c756e0a4 886 struct fuse_fill_data data;
db50b96c 887 int err;
f8dbdf81 888 int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ);
248d86e8 889
1d7ea732 890 err = -EIO;
248d86e8 891 if (is_bad_inode(inode))
2e990021 892 goto out;
248d86e8 893
a6643094 894 data.file = file;
db50b96c 895 data.inode = inode;
8b41e671
MP
896 if (fc->async_read)
897 data.req = fuse_get_req_for_background(fc, nr_alloc);
898 else
899 data.req = fuse_get_req(fc, nr_alloc);
f8dbdf81 900 data.nr_pages = nr_pages;
1d7ea732 901 err = PTR_ERR(data.req);
ce1d5a49 902 if (IS_ERR(data.req))
2e990021 903 goto out;
db50b96c
MS
904
905 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
d3406ffa
MS
906 if (!err) {
907 if (data.req->num_pages)
2106cb18 908 fuse_send_readpages(data.req, file);
d3406ffa
MS
909 else
910 fuse_put_request(fc, data.req);
911 }
2e990021 912out:
1d7ea732 913 return err;
db50b96c
MS
914}
915
37c20f16 916static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
bcb4be80
MS
917{
918 struct inode *inode = iocb->ki_filp->f_mapping->host;
a8894274 919 struct fuse_conn *fc = get_fuse_conn(inode);
bcb4be80 920
a8894274
BF
921 /*
922 * In auto invalidate mode, always update attributes on read.
923 * Otherwise, only update if we attempt to read past EOF (to ensure
924 * i_size is up to date).
925 */
926 if (fc->auto_inval_data ||
37c20f16 927 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) {
bcb4be80 928 int err;
5b97eeac 929 err = fuse_update_attributes(inode, iocb->ki_filp);
bcb4be80
MS
930 if (err)
931 return err;
932 }
933
37c20f16 934 return generic_file_read_iter(iocb, to);
bcb4be80
MS
935}
936
2d698b07 937static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
2106cb18 938 loff_t pos, size_t count)
b6aeaded 939{
b25e82e5
MS
940 struct fuse_write_in *inarg = &req->misc.write.in;
941 struct fuse_write_out *outarg = &req->misc.write.out;
b6aeaded 942
b25e82e5
MS
943 inarg->fh = ff->fh;
944 inarg->offset = pos;
945 inarg->size = count;
b6aeaded 946 req->in.h.opcode = FUSE_WRITE;
2106cb18 947 req->in.h.nodeid = ff->nodeid;
b6aeaded 948 req->in.numargs = 2;
2106cb18 949 if (ff->fc->minor < 9)
f3332114
MS
950 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
951 else
952 req->in.args[0].size = sizeof(struct fuse_write_in);
b25e82e5 953 req->in.args[0].value = inarg;
b6aeaded
MS
954 req->in.args[1].size = count;
955 req->out.numargs = 1;
956 req->out.args[0].size = sizeof(struct fuse_write_out);
b25e82e5
MS
957 req->out.args[0].value = outarg;
958}
959
36cf66ed 960static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io,
2106cb18 961 loff_t pos, size_t count, fl_owner_t owner)
b25e82e5 962{
e1c0eecb
MS
963 struct kiocb *iocb = io->iocb;
964 struct file *file = iocb->ki_filp;
2106cb18
MS
965 struct fuse_file *ff = file->private_data;
966 struct fuse_conn *fc = ff->fc;
2d698b07
MS
967 struct fuse_write_in *inarg = &req->misc.write.in;
968
2106cb18 969 fuse_write_fill(req, ff, pos, count);
2d698b07 970 inarg->flags = file->f_flags;
e1c0eecb
MS
971 if (iocb->ki_flags & IOCB_DSYNC)
972 inarg->flags |= O_DSYNC;
973 if (iocb->ki_flags & IOCB_SYNC)
974 inarg->flags |= O_SYNC;
f3332114 975 if (owner != NULL) {
f3332114
MS
976 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
977 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
978 }
36cf66ed
MP
979
980 if (io->async)
981 return fuse_async_req_send(fc, req, count, io);
982
b93f858a 983 fuse_request_send(fc, req);
b25e82e5 984 return req->misc.write.out.size;
b6aeaded
MS
985}
986
b0aa7606 987bool fuse_write_update_size(struct inode *inode, loff_t pos)
854512ec
MS
988{
989 struct fuse_conn *fc = get_fuse_conn(inode);
990 struct fuse_inode *fi = get_fuse_inode(inode);
b0aa7606 991 bool ret = false;
854512ec
MS
992
993 spin_lock(&fc->lock);
994 fi->attr_version = ++fc->attr_version;
b0aa7606 995 if (pos > inode->i_size) {
854512ec 996 i_size_write(inode, pos);
b0aa7606
MP
997 ret = true;
998 }
854512ec 999 spin_unlock(&fc->lock);
b0aa7606
MP
1000
1001 return ret;
854512ec
MS
1002}
1003
e1c0eecb 1004static size_t fuse_send_write_pages(struct fuse_req *req, struct kiocb *iocb,
ea9b9907
NP
1005 struct inode *inode, loff_t pos,
1006 size_t count)
1007{
1008 size_t res;
1009 unsigned offset;
1010 unsigned i;
e1c0eecb 1011 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
ea9b9907
NP
1012
1013 for (i = 0; i < req->num_pages; i++)
1014 fuse_wait_on_page_writeback(inode, req->pages[i]->index);
1015
36cf66ed 1016 res = fuse_send_write(req, &io, pos, count, NULL);
ea9b9907 1017
b2430d75 1018 offset = req->page_descs[0].offset;
ea9b9907
NP
1019 count = res;
1020 for (i = 0; i < req->num_pages; i++) {
1021 struct page *page = req->pages[i];
1022
09cbfeaf 1023 if (!req->out.h.error && !offset && count >= PAGE_SIZE)
ea9b9907
NP
1024 SetPageUptodate(page);
1025
09cbfeaf
KS
1026 if (count > PAGE_SIZE - offset)
1027 count -= PAGE_SIZE - offset;
ea9b9907
NP
1028 else
1029 count = 0;
1030 offset = 0;
1031
1032 unlock_page(page);
09cbfeaf 1033 put_page(page);
ea9b9907
NP
1034 }
1035
1036 return res;
1037}
1038
1039static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1040 struct address_space *mapping,
1041 struct iov_iter *ii, loff_t pos)
1042{
1043 struct fuse_conn *fc = get_fuse_conn(mapping->host);
09cbfeaf 1044 unsigned offset = pos & (PAGE_SIZE - 1);
ea9b9907
NP
1045 size_t count = 0;
1046 int err;
1047
f4975c67 1048 req->in.argpages = 1;
b2430d75 1049 req->page_descs[0].offset = offset;
ea9b9907
NP
1050
1051 do {
1052 size_t tmp;
1053 struct page *page;
09cbfeaf
KS
1054 pgoff_t index = pos >> PAGE_SHIFT;
1055 size_t bytes = min_t(size_t, PAGE_SIZE - offset,
ea9b9907
NP
1056 iov_iter_count(ii));
1057
1058 bytes = min_t(size_t, bytes, fc->max_write - count);
1059
1060 again:
1061 err = -EFAULT;
1062 if (iov_iter_fault_in_readable(ii, bytes))
1063 break;
1064
1065 err = -ENOMEM;
54566b2c 1066 page = grab_cache_page_write_begin(mapping, index, 0);
ea9b9907
NP
1067 if (!page)
1068 break;
1069
931e80e4 1070 if (mapping_writably_mapped(mapping))
1071 flush_dcache_page(page);
1072
ea9b9907 1073 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
ea9b9907
NP
1074 flush_dcache_page(page);
1075
3ca8138f 1076 iov_iter_advance(ii, tmp);
ea9b9907
NP
1077 if (!tmp) {
1078 unlock_page(page);
09cbfeaf 1079 put_page(page);
ea9b9907
NP
1080 bytes = min(bytes, iov_iter_single_seg_count(ii));
1081 goto again;
1082 }
1083
1084 err = 0;
1085 req->pages[req->num_pages] = page;
85f40aec 1086 req->page_descs[req->num_pages].length = tmp;
ea9b9907
NP
1087 req->num_pages++;
1088
ea9b9907
NP
1089 count += tmp;
1090 pos += tmp;
1091 offset += tmp;
09cbfeaf 1092 if (offset == PAGE_SIZE)
ea9b9907
NP
1093 offset = 0;
1094
78bb6cb9
MS
1095 if (!fc->big_writes)
1096 break;
ea9b9907 1097 } while (iov_iter_count(ii) && count < fc->max_write &&
d07f09f5 1098 req->num_pages < req->max_pages && offset == 0);
ea9b9907
NP
1099
1100 return count > 0 ? count : err;
1101}
1102
d07f09f5
MP
1103static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
1104{
1105 return min_t(unsigned,
09cbfeaf
KS
1106 ((pos + len - 1) >> PAGE_SHIFT) -
1107 (pos >> PAGE_SHIFT) + 1,
d07f09f5
MP
1108 FUSE_MAX_PAGES_PER_REQ);
1109}
1110
e1c0eecb 1111static ssize_t fuse_perform_write(struct kiocb *iocb,
ea9b9907
NP
1112 struct address_space *mapping,
1113 struct iov_iter *ii, loff_t pos)
1114{
1115 struct inode *inode = mapping->host;
1116 struct fuse_conn *fc = get_fuse_conn(inode);
06a7c3c2 1117 struct fuse_inode *fi = get_fuse_inode(inode);
ea9b9907
NP
1118 int err = 0;
1119 ssize_t res = 0;
1120
1121 if (is_bad_inode(inode))
1122 return -EIO;
1123
06a7c3c2
MP
1124 if (inode->i_size < pos + iov_iter_count(ii))
1125 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1126
ea9b9907
NP
1127 do {
1128 struct fuse_req *req;
1129 ssize_t count;
d07f09f5 1130 unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii));
ea9b9907 1131
d07f09f5 1132 req = fuse_get_req(fc, nr_pages);
ea9b9907
NP
1133 if (IS_ERR(req)) {
1134 err = PTR_ERR(req);
1135 break;
1136 }
1137
1138 count = fuse_fill_write_pages(req, mapping, ii, pos);
1139 if (count <= 0) {
1140 err = count;
1141 } else {
1142 size_t num_written;
1143
e1c0eecb 1144 num_written = fuse_send_write_pages(req, iocb, inode,
ea9b9907
NP
1145 pos, count);
1146 err = req->out.h.error;
1147 if (!err) {
1148 res += num_written;
1149 pos += num_written;
1150
1151 /* break out of the loop on short write */
1152 if (num_written != count)
1153 err = -EIO;
1154 }
1155 }
1156 fuse_put_request(fc, req);
1157 } while (!err && iov_iter_count(ii));
1158
1159 if (res > 0)
1160 fuse_write_update_size(inode, pos);
1161
06a7c3c2 1162 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
ea9b9907
NP
1163 fuse_invalidate_attr(inode);
1164
1165 return res > 0 ? res : err;
1166}
1167
84c3d55c 1168static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
ea9b9907
NP
1169{
1170 struct file *file = iocb->ki_filp;
1171 struct address_space *mapping = file->f_mapping;
ea9b9907 1172 ssize_t written = 0;
4273b793 1173 ssize_t written_buffered = 0;
ea9b9907
NP
1174 struct inode *inode = mapping->host;
1175 ssize_t err;
4273b793 1176 loff_t endbyte = 0;
ea9b9907 1177
4d99ff8f
PE
1178 if (get_fuse_conn(inode)->writeback_cache) {
1179 /* Update size (EOF optimization) and mode (SUID clearing) */
5b97eeac 1180 err = fuse_update_attributes(mapping->host, file);
4d99ff8f
PE
1181 if (err)
1182 return err;
1183
84c3d55c 1184 return generic_file_write_iter(iocb, from);
4d99ff8f
PE
1185 }
1186
5955102c 1187 inode_lock(inode);
ea9b9907
NP
1188
1189 /* We can write back this queue in page reclaim */
de1414a6 1190 current->backing_dev_info = inode_to_bdi(inode);
ea9b9907 1191
3309dd04
AV
1192 err = generic_write_checks(iocb, from);
1193 if (err <= 0)
ea9b9907
NP
1194 goto out;
1195
5fa8e0a1 1196 err = file_remove_privs(file);
ea9b9907
NP
1197 if (err)
1198 goto out;
1199
c3b2da31
JB
1200 err = file_update_time(file);
1201 if (err)
1202 goto out;
ea9b9907 1203
2ba48ce5 1204 if (iocb->ki_flags & IOCB_DIRECT) {
3309dd04 1205 loff_t pos = iocb->ki_pos;
1af5bb49 1206 written = generic_file_direct_write(iocb, from);
84c3d55c 1207 if (written < 0 || !iov_iter_count(from))
4273b793
AA
1208 goto out;
1209
1210 pos += written;
ea9b9907 1211
e1c0eecb 1212 written_buffered = fuse_perform_write(iocb, mapping, from, pos);
4273b793
AA
1213 if (written_buffered < 0) {
1214 err = written_buffered;
1215 goto out;
1216 }
1217 endbyte = pos + written_buffered - 1;
1218
1219 err = filemap_write_and_wait_range(file->f_mapping, pos,
1220 endbyte);
1221 if (err)
1222 goto out;
1223
1224 invalidate_mapping_pages(file->f_mapping,
09cbfeaf
KS
1225 pos >> PAGE_SHIFT,
1226 endbyte >> PAGE_SHIFT);
4273b793
AA
1227
1228 written += written_buffered;
1229 iocb->ki_pos = pos + written_buffered;
1230 } else {
e1c0eecb 1231 written = fuse_perform_write(iocb, mapping, from, iocb->ki_pos);
4273b793 1232 if (written >= 0)
3309dd04 1233 iocb->ki_pos += written;
4273b793 1234 }
ea9b9907
NP
1235out:
1236 current->backing_dev_info = NULL;
5955102c 1237 inode_unlock(inode);
e1c0eecb
MS
1238 if (written > 0)
1239 written = generic_write_sync(iocb, written);
ea9b9907
NP
1240
1241 return written ? written : err;
1242}
1243
7c190c8b
MP
1244static inline void fuse_page_descs_length_init(struct fuse_req *req,
1245 unsigned index, unsigned nr_pages)
85f40aec
MP
1246{
1247 int i;
1248
7c190c8b 1249 for (i = index; i < index + nr_pages; i++)
85f40aec
MP
1250 req->page_descs[i].length = PAGE_SIZE -
1251 req->page_descs[i].offset;
1252}
1253
7c190c8b
MP
1254static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
1255{
1256 return (unsigned long)ii->iov->iov_base + ii->iov_offset;
1257}
1258
1259static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
1260 size_t max_size)
1261{
1262 return min(iov_iter_single_seg_count(ii), max_size);
1263}
1264
b98d023a 1265static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
ce60a2f1 1266 size_t *nbytesp, int write)
413ef8cb 1267{
7c190c8b 1268 size_t nbytes = 0; /* # bytes already packed in req */
742f9927 1269 ssize_t ret = 0;
b98d023a 1270
f4975c67 1271 /* Special case for kernel I/O: can copy directly into the buffer */
62a8067a 1272 if (ii->type & ITER_KVEC) {
7c190c8b
MP
1273 unsigned long user_addr = fuse_get_user_addr(ii);
1274 size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
1275
f4975c67
MS
1276 if (write)
1277 req->in.args[1].value = (void *) user_addr;
1278 else
1279 req->out.args[0].value = (void *) user_addr;
1280
b98d023a
MP
1281 iov_iter_advance(ii, frag_size);
1282 *nbytesp = frag_size;
f4975c67
MS
1283 return 0;
1284 }
413ef8cb 1285
5565a9d8 1286 while (nbytes < *nbytesp && req->num_pages < req->max_pages) {
7c190c8b 1287 unsigned npages;
f67da30c 1288 size_t start;
742f9927 1289 ret = iov_iter_get_pages(ii, &req->pages[req->num_pages],
2c80929c 1290 *nbytesp - nbytes,
c7f3888a
AV
1291 req->max_pages - req->num_pages,
1292 &start);
7c190c8b 1293 if (ret < 0)
742f9927 1294 break;
7c190c8b 1295
c9c37e2e
AV
1296 iov_iter_advance(ii, ret);
1297 nbytes += ret;
7c190c8b 1298
c9c37e2e
AV
1299 ret += start;
1300 npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
7c190c8b 1301
c9c37e2e 1302 req->page_descs[req->num_pages].offset = start;
7c190c8b
MP
1303 fuse_page_descs_length_init(req, req->num_pages, npages);
1304
1305 req->num_pages += npages;
1306 req->page_descs[req->num_pages - 1].length -=
c9c37e2e 1307 (PAGE_SIZE - ret) & (PAGE_SIZE - 1);
7c190c8b 1308 }
f4975c67
MS
1309
1310 if (write)
1311 req->in.argpages = 1;
1312 else
1313 req->out.argpages = 1;
1314
7c190c8b 1315 *nbytesp = nbytes;
f4975c67 1316
2c932d4c 1317 return ret < 0 ? ret : 0;
413ef8cb
MS
1318}
1319
5565a9d8
MP
1320static inline int fuse_iter_npages(const struct iov_iter *ii_p)
1321{
f67da30c 1322 return iov_iter_npages(ii_p, FUSE_MAX_PAGES_PER_REQ);
5565a9d8
MP
1323}
1324
d22a943f
AV
1325ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1326 loff_t *ppos, int flags)
413ef8cb 1327{
ea8cd333
PE
1328 int write = flags & FUSE_DIO_WRITE;
1329 int cuse = flags & FUSE_DIO_CUSE;
e1c0eecb 1330 struct file *file = io->iocb->ki_filp;
ea8cd333 1331 struct inode *inode = file->f_mapping->host;
2106cb18
MS
1332 struct fuse_file *ff = file->private_data;
1333 struct fuse_conn *fc = ff->fc;
413ef8cb
MS
1334 size_t nmax = write ? fc->max_write : fc->max_read;
1335 loff_t pos = *ppos;
d22a943f 1336 size_t count = iov_iter_count(iter);
09cbfeaf
KS
1337 pgoff_t idx_from = pos >> PAGE_SHIFT;
1338 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
413ef8cb 1339 ssize_t res = 0;
248d86e8 1340 struct fuse_req *req;
742f9927 1341 int err = 0;
248d86e8 1342
de82b923 1343 if (io->async)
d22a943f 1344 req = fuse_get_req_for_background(fc, fuse_iter_npages(iter));
de82b923 1345 else
d22a943f 1346 req = fuse_get_req(fc, fuse_iter_npages(iter));
ce1d5a49
MS
1347 if (IS_ERR(req))
1348 return PTR_ERR(req);
413ef8cb 1349
ea8cd333
PE
1350 if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
1351 if (!write)
5955102c 1352 inode_lock(inode);
ea8cd333
PE
1353 fuse_sync_writes(inode);
1354 if (!write)
5955102c 1355 inode_unlock(inode);
ea8cd333
PE
1356 }
1357
61c12b49 1358 io->should_dirty = !write && iter_is_iovec(iter);
413ef8cb 1359 while (count) {
413ef8cb 1360 size_t nres;
2106cb18 1361 fl_owner_t owner = current->files;
f4975c67 1362 size_t nbytes = min(count, nmax);
742f9927
AS
1363 err = fuse_get_user_pages(req, iter, &nbytes, write);
1364 if (err && !nbytes)
413ef8cb 1365 break;
f4975c67 1366
413ef8cb 1367 if (write)
36cf66ed 1368 nres = fuse_send_write(req, io, pos, nbytes, owner);
413ef8cb 1369 else
36cf66ed 1370 nres = fuse_send_read(req, io, pos, nbytes, owner);
2106cb18 1371
36cf66ed 1372 if (!io->async)
61c12b49 1373 fuse_release_user_pages(req, io->should_dirty);
413ef8cb 1374 if (req->out.h.error) {
742f9927 1375 err = req->out.h.error;
413ef8cb
MS
1376 break;
1377 } else if (nres > nbytes) {
742f9927
AS
1378 res = 0;
1379 err = -EIO;
413ef8cb
MS
1380 break;
1381 }
1382 count -= nres;
1383 res += nres;
1384 pos += nres;
413ef8cb
MS
1385 if (nres != nbytes)
1386 break;
56cf34ff
MS
1387 if (count) {
1388 fuse_put_request(fc, req);
de82b923
BF
1389 if (io->async)
1390 req = fuse_get_req_for_background(fc,
d22a943f 1391 fuse_iter_npages(iter));
de82b923 1392 else
d22a943f 1393 req = fuse_get_req(fc, fuse_iter_npages(iter));
56cf34ff
MS
1394 if (IS_ERR(req))
1395 break;
1396 }
413ef8cb 1397 }
f60311d5
AA
1398 if (!IS_ERR(req))
1399 fuse_put_request(fc, req);
d09cb9d7 1400 if (res > 0)
413ef8cb 1401 *ppos = pos;
413ef8cb 1402
742f9927 1403 return res > 0 ? res : err;
413ef8cb 1404}
08cbf542 1405EXPORT_SYMBOL_GPL(fuse_direct_io);
413ef8cb 1406
36cf66ed 1407static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
d22a943f
AV
1408 struct iov_iter *iter,
1409 loff_t *ppos)
413ef8cb 1410{
d09cb9d7 1411 ssize_t res;
e1c0eecb 1412 struct inode *inode = file_inode(io->iocb->ki_filp);
d09cb9d7
MS
1413
1414 if (is_bad_inode(inode))
1415 return -EIO;
1416
d22a943f 1417 res = fuse_direct_io(io, iter, ppos, 0);
d09cb9d7
MS
1418
1419 fuse_invalidate_attr(inode);
1420
1421 return res;
413ef8cb
MS
1422}
1423
15316263 1424static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
b98d023a 1425{
e1c0eecb 1426 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
15316263 1427 return __fuse_direct_read(&io, to, &iocb->ki_pos);
b98d023a
MP
1428}
1429
15316263 1430static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
4273b793 1431{
e1c0eecb
MS
1432 struct inode *inode = file_inode(iocb->ki_filp);
1433 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
15316263 1434 ssize_t res;
4273b793
AA
1435
1436 if (is_bad_inode(inode))
1437 return -EIO;
1438
1439 /* Don't allow parallel writes to the same file */
5955102c 1440 inode_lock(inode);
3309dd04
AV
1441 res = generic_write_checks(iocb, from);
1442 if (res > 0)
812408fb 1443 res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
812408fb 1444 fuse_invalidate_attr(inode);
bcba24cc 1445 if (res > 0)
15316263 1446 fuse_write_update_size(inode, iocb->ki_pos);
5955102c 1447 inode_unlock(inode);
4273b793
AA
1448
1449 return res;
1450}
1451
3be5a52b 1452static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
b6aeaded 1453{
385b1268
PE
1454 int i;
1455
1456 for (i = 0; i < req->num_pages; i++)
1457 __free_page(req->pages[i]);
8b284dc4
MS
1458
1459 if (req->ff)
1460 fuse_file_put(req->ff, false);
3be5a52b
MS
1461}
1462
1463static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1464{
1465 struct inode *inode = req->inode;
1466 struct fuse_inode *fi = get_fuse_inode(inode);
de1414a6 1467 struct backing_dev_info *bdi = inode_to_bdi(inode);
385b1268 1468 int i;
3be5a52b
MS
1469
1470 list_del(&req->writepages_entry);
385b1268 1471 for (i = 0; i < req->num_pages; i++) {
93f78d88 1472 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
11fb9989 1473 dec_node_page_state(req->pages[i], NR_WRITEBACK_TEMP);
93f78d88 1474 wb_writeout_inc(&bdi->wb);
385b1268 1475 }
3be5a52b
MS
1476 wake_up(&fi->page_waitq);
1477}
1478
1479/* Called under fc->lock, may release and reacquire it */
6eaf4782
MP
1480static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req,
1481 loff_t size)
b9ca67b2
MS
1482__releases(fc->lock)
1483__acquires(fc->lock)
3be5a52b
MS
1484{
1485 struct fuse_inode *fi = get_fuse_inode(req->inode);
3be5a52b 1486 struct fuse_write_in *inarg = &req->misc.write.in;
09cbfeaf 1487 __u64 data_size = req->num_pages * PAGE_SIZE;
3be5a52b
MS
1488
1489 if (!fc->connected)
1490 goto out_free;
1491
385b1268
PE
1492 if (inarg->offset + data_size <= size) {
1493 inarg->size = data_size;
3be5a52b 1494 } else if (inarg->offset < size) {
385b1268 1495 inarg->size = size - inarg->offset;
3be5a52b
MS
1496 } else {
1497 /* Got truncated off completely */
1498 goto out_free;
b6aeaded 1499 }
3be5a52b
MS
1500
1501 req->in.args[1].size = inarg->size;
1502 fi->writectr++;
b93f858a 1503 fuse_request_send_background_locked(fc, req);
3be5a52b
MS
1504 return;
1505
1506 out_free:
1507 fuse_writepage_finish(fc, req);
1508 spin_unlock(&fc->lock);
1509 fuse_writepage_free(fc, req);
e9bb09dd 1510 fuse_put_request(fc, req);
3be5a52b 1511 spin_lock(&fc->lock);
b6aeaded
MS
1512}
1513
3be5a52b
MS
1514/*
1515 * If fi->writectr is positive (no truncate or fsync going on) send
1516 * all queued writepage requests.
1517 *
1518 * Called with fc->lock
1519 */
1520void fuse_flush_writepages(struct inode *inode)
b9ca67b2
MS
1521__releases(fc->lock)
1522__acquires(fc->lock)
b6aeaded 1523{
3be5a52b
MS
1524 struct fuse_conn *fc = get_fuse_conn(inode);
1525 struct fuse_inode *fi = get_fuse_inode(inode);
6eaf4782 1526 size_t crop = i_size_read(inode);
3be5a52b
MS
1527 struct fuse_req *req;
1528
1529 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1530 req = list_entry(fi->queued_writes.next, struct fuse_req, list);
1531 list_del_init(&req->list);
6eaf4782 1532 fuse_send_writepage(fc, req, crop);
3be5a52b
MS
1533 }
1534}
1535
1536static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
1537{
1538 struct inode *inode = req->inode;
1539 struct fuse_inode *fi = get_fuse_inode(inode);
1540
1541 mapping_set_error(inode->i_mapping, req->out.h.error);
1542 spin_lock(&fc->lock);
8b284dc4 1543 while (req->misc.write.next) {
6eaf4782
MP
1544 struct fuse_conn *fc = get_fuse_conn(inode);
1545 struct fuse_write_in *inarg = &req->misc.write.in;
8b284dc4
MS
1546 struct fuse_req *next = req->misc.write.next;
1547 req->misc.write.next = next->misc.write.next;
1548 next->misc.write.next = NULL;
ce128de6 1549 next->ff = fuse_file_get(req->ff);
8b284dc4 1550 list_add(&next->writepages_entry, &fi->writepages);
6eaf4782
MP
1551
1552 /*
1553 * Skip fuse_flush_writepages() to make it easy to crop requests
1554 * based on primary request size.
1555 *
1556 * 1st case (trivial): there are no concurrent activities using
1557 * fuse_set/release_nowrite. Then we're on safe side because
1558 * fuse_flush_writepages() would call fuse_send_writepage()
1559 * anyway.
1560 *
1561 * 2nd case: someone called fuse_set_nowrite and it is waiting
1562 * now for completion of all in-flight requests. This happens
1563 * rarely and no more than once per page, so this should be
1564 * okay.
1565 *
1566 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
1567 * of fuse_set_nowrite..fuse_release_nowrite section. The fact
1568 * that fuse_set_nowrite returned implies that all in-flight
1569 * requests were completed along with all of their secondary
1570 * requests. Further primary requests are blocked by negative
1571 * writectr. Hence there cannot be any in-flight requests and
1572 * no invocations of fuse_writepage_end() while we're in
1573 * fuse_set_nowrite..fuse_release_nowrite section.
1574 */
1575 fuse_send_writepage(fc, next, inarg->offset + inarg->size);
8b284dc4 1576 }
3be5a52b
MS
1577 fi->writectr--;
1578 fuse_writepage_finish(fc, req);
1579 spin_unlock(&fc->lock);
1580 fuse_writepage_free(fc, req);
1581}
1582
1e18bda8
MS
1583static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc,
1584 struct fuse_inode *fi)
adcadfa8 1585{
72523425 1586 struct fuse_file *ff = NULL;
adcadfa8
PE
1587
1588 spin_lock(&fc->lock);
1e18bda8 1589 if (!list_empty(&fi->write_files)) {
72523425
MS
1590 ff = list_entry(fi->write_files.next, struct fuse_file,
1591 write_entry);
1592 fuse_file_get(ff);
1593 }
adcadfa8
PE
1594 spin_unlock(&fc->lock);
1595
1596 return ff;
1597}
1598
1e18bda8
MS
1599static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
1600 struct fuse_inode *fi)
1601{
1602 struct fuse_file *ff = __fuse_write_file_get(fc, fi);
1603 WARN_ON(!ff);
1604 return ff;
1605}
1606
1607int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
1608{
1609 struct fuse_conn *fc = get_fuse_conn(inode);
1610 struct fuse_inode *fi = get_fuse_inode(inode);
1611 struct fuse_file *ff;
1612 int err;
1613
1614 ff = __fuse_write_file_get(fc, fi);
ab9e13f7 1615 err = fuse_flush_times(inode, ff);
1e18bda8
MS
1616 if (ff)
1617 fuse_file_put(ff, 0);
1618
1619 return err;
1620}
1621
3be5a52b
MS
1622static int fuse_writepage_locked(struct page *page)
1623{
1624 struct address_space *mapping = page->mapping;
1625 struct inode *inode = mapping->host;
1626 struct fuse_conn *fc = get_fuse_conn(inode);
1627 struct fuse_inode *fi = get_fuse_inode(inode);
1628 struct fuse_req *req;
3be5a52b 1629 struct page *tmp_page;
72523425 1630 int error = -ENOMEM;
3be5a52b
MS
1631
1632 set_page_writeback(page);
1633
4250c066 1634 req = fuse_request_alloc_nofs(1);
3be5a52b
MS
1635 if (!req)
1636 goto err;
1637
825d6d33
MS
1638 /* writeback always goes to bg_queue */
1639 __set_bit(FR_BACKGROUND, &req->flags);
3be5a52b
MS
1640 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1641 if (!tmp_page)
1642 goto err_free;
1643
72523425 1644 error = -EIO;
26d614df 1645 req->ff = fuse_write_file_get(fc, fi);
72523425 1646 if (!req->ff)
27f1b363 1647 goto err_nofile;
72523425 1648
adcadfa8 1649 fuse_write_fill(req, req->ff, page_offset(page), 0);
3be5a52b
MS
1650
1651 copy_highpage(tmp_page, page);
2d698b07 1652 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
8b284dc4 1653 req->misc.write.next = NULL;
f4975c67 1654 req->in.argpages = 1;
3be5a52b
MS
1655 req->num_pages = 1;
1656 req->pages[0] = tmp_page;
b2430d75 1657 req->page_descs[0].offset = 0;
85f40aec 1658 req->page_descs[0].length = PAGE_SIZE;
3be5a52b
MS
1659 req->end = fuse_writepage_end;
1660 req->inode = inode;
1661
93f78d88 1662 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
11fb9989 1663 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
3be5a52b
MS
1664
1665 spin_lock(&fc->lock);
1666 list_add(&req->writepages_entry, &fi->writepages);
1667 list_add_tail(&req->list, &fi->queued_writes);
1668 fuse_flush_writepages(inode);
1669 spin_unlock(&fc->lock);
1670
4a4ac4eb
MP
1671 end_page_writeback(page);
1672
3be5a52b
MS
1673 return 0;
1674
27f1b363
MP
1675err_nofile:
1676 __free_page(tmp_page);
3be5a52b
MS
1677err_free:
1678 fuse_request_free(req);
1679err:
9183976e 1680 mapping_set_error(page->mapping, error);
3be5a52b 1681 end_page_writeback(page);
72523425 1682 return error;
3be5a52b
MS
1683}
1684
1685static int fuse_writepage(struct page *page, struct writeback_control *wbc)
1686{
1687 int err;
1688
ff17be08
MS
1689 if (fuse_page_is_writeback(page->mapping->host, page->index)) {
1690 /*
1691 * ->writepages() should be called for sync() and friends. We
1692 * should only get here on direct reclaim and then we are
1693 * allowed to skip a page which is already in flight
1694 */
1695 WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
1696
1697 redirty_page_for_writepage(wbc, page);
1698 return 0;
1699 }
1700
3be5a52b
MS
1701 err = fuse_writepage_locked(page);
1702 unlock_page(page);
1703
1704 return err;
1705}
1706
26d614df
PE
1707struct fuse_fill_wb_data {
1708 struct fuse_req *req;
1709 struct fuse_file *ff;
1710 struct inode *inode;
2d033eaa 1711 struct page **orig_pages;
26d614df
PE
1712};
1713
1714static void fuse_writepages_send(struct fuse_fill_wb_data *data)
1715{
1716 struct fuse_req *req = data->req;
1717 struct inode *inode = data->inode;
1718 struct fuse_conn *fc = get_fuse_conn(inode);
1719 struct fuse_inode *fi = get_fuse_inode(inode);
2d033eaa
MP
1720 int num_pages = req->num_pages;
1721 int i;
26d614df
PE
1722
1723 req->ff = fuse_file_get(data->ff);
1724 spin_lock(&fc->lock);
1725 list_add_tail(&req->list, &fi->queued_writes);
1726 fuse_flush_writepages(inode);
1727 spin_unlock(&fc->lock);
2d033eaa
MP
1728
1729 for (i = 0; i < num_pages; i++)
1730 end_page_writeback(data->orig_pages[i]);
26d614df
PE
1731}
1732
8b284dc4
MS
1733static bool fuse_writepage_in_flight(struct fuse_req *new_req,
1734 struct page *page)
1735{
1736 struct fuse_conn *fc = get_fuse_conn(new_req->inode);
1737 struct fuse_inode *fi = get_fuse_inode(new_req->inode);
1738 struct fuse_req *tmp;
1739 struct fuse_req *old_req;
1740 bool found = false;
1741 pgoff_t curr_index;
1742
1743 BUG_ON(new_req->num_pages != 0);
1744
1745 spin_lock(&fc->lock);
1746 list_del(&new_req->writepages_entry);
8b284dc4
MS
1747 list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
1748 BUG_ON(old_req->inode != new_req->inode);
09cbfeaf 1749 curr_index = old_req->misc.write.in.offset >> PAGE_SHIFT;
8b284dc4
MS
1750 if (curr_index <= page->index &&
1751 page->index < curr_index + old_req->num_pages) {
1752 found = true;
1753 break;
1754 }
1755 }
f6011081
MP
1756 if (!found) {
1757 list_add(&new_req->writepages_entry, &fi->writepages);
8b284dc4 1758 goto out_unlock;
f6011081 1759 }
8b284dc4 1760
f6011081 1761 new_req->num_pages = 1;
8b284dc4
MS
1762 for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
1763 BUG_ON(tmp->inode != new_req->inode);
09cbfeaf 1764 curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT;
8b284dc4
MS
1765 if (tmp->num_pages == 1 &&
1766 curr_index == page->index) {
1767 old_req = tmp;
1768 }
1769 }
1770
33e14b4d 1771 if (old_req->num_pages == 1 && test_bit(FR_PENDING, &old_req->flags)) {
de1414a6 1772 struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host);
41b6e41f 1773
8b284dc4
MS
1774 copy_highpage(old_req->pages[0], page);
1775 spin_unlock(&fc->lock);
1776
93f78d88 1777 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
11fb9989 1778 dec_node_page_state(page, NR_WRITEBACK_TEMP);
93f78d88 1779 wb_writeout_inc(&bdi->wb);
8b284dc4
MS
1780 fuse_writepage_free(fc, new_req);
1781 fuse_request_free(new_req);
1782 goto out;
1783 } else {
1784 new_req->misc.write.next = old_req->misc.write.next;
1785 old_req->misc.write.next = new_req;
1786 }
1787out_unlock:
1788 spin_unlock(&fc->lock);
1789out:
1790 return found;
1791}
1792
26d614df
PE
1793static int fuse_writepages_fill(struct page *page,
1794 struct writeback_control *wbc, void *_data)
1795{
1796 struct fuse_fill_wb_data *data = _data;
1797 struct fuse_req *req = data->req;
1798 struct inode *inode = data->inode;
1799 struct fuse_conn *fc = get_fuse_conn(inode);
1800 struct page *tmp_page;
8b284dc4 1801 bool is_writeback;
26d614df
PE
1802 int err;
1803
1804 if (!data->ff) {
1805 err = -EIO;
1806 data->ff = fuse_write_file_get(fc, get_fuse_inode(inode));
1807 if (!data->ff)
1808 goto out_unlock;
1809 }
1810
8b284dc4
MS
1811 /*
1812 * Being under writeback is unlikely but possible. For example direct
1813 * read to an mmaped fuse file will set the page dirty twice; once when
1814 * the pages are faulted with get_user_pages(), and then after the read
1815 * completed.
1816 */
1817 is_writeback = fuse_page_is_writeback(inode, page->index);
1818
1819 if (req && req->num_pages &&
1820 (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
09cbfeaf 1821 (req->num_pages + 1) * PAGE_SIZE > fc->max_write ||
8b284dc4
MS
1822 data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
1823 fuse_writepages_send(data);
1824 data->req = NULL;
26d614df
PE
1825 }
1826 err = -ENOMEM;
1827 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1828 if (!tmp_page)
1829 goto out_unlock;
1830
1831 /*
1832 * The page must not be redirtied until the writeout is completed
1833 * (i.e. userspace has sent a reply to the write request). Otherwise
1834 * there could be more than one temporary page instance for each real
1835 * page.
1836 *
1837 * This is ensured by holding the page lock in page_mkwrite() while
1838 * checking fuse_page_is_writeback(). We already hold the page lock
1839 * since clear_page_dirty_for_io() and keep it held until we add the
1840 * request to the fi->writepages list and increment req->num_pages.
1841 * After this fuse_page_is_writeback() will indicate that the page is
1842 * under writeback, so we can release the page lock.
1843 */
1844 if (data->req == NULL) {
1845 struct fuse_inode *fi = get_fuse_inode(inode);
1846
1847 err = -ENOMEM;
1848 req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ);
1849 if (!req) {
1850 __free_page(tmp_page);
1851 goto out_unlock;
1852 }
1853
1854 fuse_write_fill(req, data->ff, page_offset(page), 0);
1855 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
8b284dc4 1856 req->misc.write.next = NULL;
26d614df 1857 req->in.argpages = 1;
825d6d33 1858 __set_bit(FR_BACKGROUND, &req->flags);
26d614df
PE
1859 req->num_pages = 0;
1860 req->end = fuse_writepage_end;
1861 req->inode = inode;
1862
1863 spin_lock(&fc->lock);
1864 list_add(&req->writepages_entry, &fi->writepages);
1865 spin_unlock(&fc->lock);
1866
1867 data->req = req;
1868 }
1869 set_page_writeback(page);
1870
1871 copy_highpage(tmp_page, page);
1872 req->pages[req->num_pages] = tmp_page;
1873 req->page_descs[req->num_pages].offset = 0;
1874 req->page_descs[req->num_pages].length = PAGE_SIZE;
1875
93f78d88 1876 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
11fb9989 1877 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
8b284dc4
MS
1878
1879 err = 0;
1880 if (is_writeback && fuse_writepage_in_flight(req, page)) {
1881 end_page_writeback(page);
1882 data->req = NULL;
1883 goto out_unlock;
1884 }
2d033eaa 1885 data->orig_pages[req->num_pages] = page;
26d614df
PE
1886
1887 /*
1888 * Protected by fc->lock against concurrent access by
1889 * fuse_page_is_writeback().
1890 */
1891 spin_lock(&fc->lock);
1892 req->num_pages++;
1893 spin_unlock(&fc->lock);
1894
26d614df
PE
1895out_unlock:
1896 unlock_page(page);
1897
1898 return err;
1899}
1900
1901static int fuse_writepages(struct address_space *mapping,
1902 struct writeback_control *wbc)
1903{
1904 struct inode *inode = mapping->host;
1905 struct fuse_fill_wb_data data;
1906 int err;
1907
1908 err = -EIO;
1909 if (is_bad_inode(inode))
1910 goto out;
1911
1912 data.inode = inode;
1913 data.req = NULL;
1914 data.ff = NULL;
1915
2d033eaa 1916 err = -ENOMEM;
f2b3455e
FF
1917 data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ,
1918 sizeof(struct page *),
2d033eaa
MP
1919 GFP_NOFS);
1920 if (!data.orig_pages)
1921 goto out;
1922
26d614df
PE
1923 err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
1924 if (data.req) {
1925 /* Ignore errors if we can write at least one page */
1926 BUG_ON(!data.req->num_pages);
1927 fuse_writepages_send(&data);
1928 err = 0;
1929 }
1930 if (data.ff)
1931 fuse_file_put(data.ff, false);
2d033eaa
MP
1932
1933 kfree(data.orig_pages);
26d614df
PE
1934out:
1935 return err;
1936}
1937
6b12c1b3
PE
1938/*
1939 * It's worthy to make sure that space is reserved on disk for the write,
1940 * but how to implement it without killing performance need more thinking.
1941 */
1942static int fuse_write_begin(struct file *file, struct address_space *mapping,
1943 loff_t pos, unsigned len, unsigned flags,
1944 struct page **pagep, void **fsdata)
1945{
09cbfeaf 1946 pgoff_t index = pos >> PAGE_SHIFT;
a455589f 1947 struct fuse_conn *fc = get_fuse_conn(file_inode(file));
6b12c1b3
PE
1948 struct page *page;
1949 loff_t fsize;
1950 int err = -ENOMEM;
1951
1952 WARN_ON(!fc->writeback_cache);
1953
1954 page = grab_cache_page_write_begin(mapping, index, flags);
1955 if (!page)
1956 goto error;
1957
1958 fuse_wait_on_page_writeback(mapping->host, page->index);
1959
09cbfeaf 1960 if (PageUptodate(page) || len == PAGE_SIZE)
6b12c1b3
PE
1961 goto success;
1962 /*
1963 * Check if the start this page comes after the end of file, in which
1964 * case the readpage can be optimized away.
1965 */
1966 fsize = i_size_read(mapping->host);
09cbfeaf
KS
1967 if (fsize <= (pos & PAGE_MASK)) {
1968 size_t off = pos & ~PAGE_MASK;
6b12c1b3
PE
1969 if (off)
1970 zero_user_segment(page, 0, off);
1971 goto success;
1972 }
1973 err = fuse_do_readpage(file, page);
1974 if (err)
1975 goto cleanup;
1976success:
1977 *pagep = page;
1978 return 0;
1979
1980cleanup:
1981 unlock_page(page);
09cbfeaf 1982 put_page(page);
6b12c1b3
PE
1983error:
1984 return err;
1985}
1986
1987static int fuse_write_end(struct file *file, struct address_space *mapping,
1988 loff_t pos, unsigned len, unsigned copied,
1989 struct page *page, void *fsdata)
1990{
1991 struct inode *inode = page->mapping->host;
1992
59c3b76c
MS
1993 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
1994 if (!copied)
1995 goto unlock;
1996
6b12c1b3
PE
1997 if (!PageUptodate(page)) {
1998 /* Zero any unwritten bytes at the end of the page */
09cbfeaf 1999 size_t endoff = (pos + copied) & ~PAGE_MASK;
6b12c1b3 2000 if (endoff)
09cbfeaf 2001 zero_user_segment(page, endoff, PAGE_SIZE);
6b12c1b3
PE
2002 SetPageUptodate(page);
2003 }
2004
2005 fuse_write_update_size(inode, pos + copied);
2006 set_page_dirty(page);
59c3b76c
MS
2007
2008unlock:
6b12c1b3 2009 unlock_page(page);
09cbfeaf 2010 put_page(page);
6b12c1b3
PE
2011
2012 return copied;
2013}
2014
3be5a52b
MS
2015static int fuse_launder_page(struct page *page)
2016{
2017 int err = 0;
2018 if (clear_page_dirty_for_io(page)) {
2019 struct inode *inode = page->mapping->host;
2020 err = fuse_writepage_locked(page);
2021 if (!err)
2022 fuse_wait_on_page_writeback(inode, page->index);
2023 }
2024 return err;
2025}
2026
2027/*
2028 * Write back dirty pages now, because there may not be any suitable
2029 * open files later
2030 */
2031static void fuse_vma_close(struct vm_area_struct *vma)
2032{
2033 filemap_write_and_wait(vma->vm_file->f_mapping);
2034}
2035
2036/*
2037 * Wait for writeback against this page to complete before allowing it
2038 * to be marked dirty again, and hence written back again, possibly
2039 * before the previous writepage completed.
2040 *
2041 * Block here, instead of in ->writepage(), so that the userspace fs
2042 * can only block processes actually operating on the filesystem.
2043 *
2044 * Otherwise unprivileged userspace fs would be able to block
2045 * unrelated:
2046 *
2047 * - page migration
2048 * - sync(2)
2049 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
2050 */
11bac800 2051static int fuse_page_mkwrite(struct vm_fault *vmf)
3be5a52b 2052{
c2ec175c 2053 struct page *page = vmf->page;
11bac800 2054 struct inode *inode = file_inode(vmf->vma->vm_file);
cca24370 2055
11bac800 2056 file_update_time(vmf->vma->vm_file);
cca24370
MS
2057 lock_page(page);
2058 if (page->mapping != inode->i_mapping) {
2059 unlock_page(page);
2060 return VM_FAULT_NOPAGE;
2061 }
3be5a52b
MS
2062
2063 fuse_wait_on_page_writeback(inode, page->index);
cca24370 2064 return VM_FAULT_LOCKED;
3be5a52b
MS
2065}
2066
f0f37e2f 2067static const struct vm_operations_struct fuse_file_vm_ops = {
3be5a52b
MS
2068 .close = fuse_vma_close,
2069 .fault = filemap_fault,
f1820361 2070 .map_pages = filemap_map_pages,
3be5a52b
MS
2071 .page_mkwrite = fuse_page_mkwrite,
2072};
2073
2074static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
2075{
650b22b9
PE
2076 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2077 fuse_link_write_file(file);
2078
3be5a52b
MS
2079 file_accessed(file);
2080 vma->vm_ops = &fuse_file_vm_ops;
b6aeaded
MS
2081 return 0;
2082}
2083
fc280c96
MS
2084static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
2085{
2086 /* Can't provide the coherency needed for MAP_SHARED */
2087 if (vma->vm_flags & VM_MAYSHARE)
2088 return -ENODEV;
2089
3121bfe7
MS
2090 invalidate_inode_pages2(file->f_mapping);
2091
fc280c96
MS
2092 return generic_file_mmap(file, vma);
2093}
2094
0b6e9ea0
SF
2095static int convert_fuse_file_lock(struct fuse_conn *fc,
2096 const struct fuse_file_lock *ffl,
71421259
MS
2097 struct file_lock *fl)
2098{
2099 switch (ffl->type) {
2100 case F_UNLCK:
2101 break;
2102
2103 case F_RDLCK:
2104 case F_WRLCK:
2105 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
2106 ffl->end < ffl->start)
2107 return -EIO;
2108
2109 fl->fl_start = ffl->start;
2110 fl->fl_end = ffl->end;
0b6e9ea0
SF
2111
2112 /*
9d5b86ac
BC
2113 * Convert pid into init's pid namespace. The locks API will
2114 * translate it into the caller's pid namespace.
0b6e9ea0
SF
2115 */
2116 rcu_read_lock();
9d5b86ac 2117 fl->fl_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns);
0b6e9ea0 2118 rcu_read_unlock();
71421259
MS
2119 break;
2120
2121 default:
2122 return -EIO;
2123 }
2124 fl->fl_type = ffl->type;
2125 return 0;
2126}
2127
7078187a 2128static void fuse_lk_fill(struct fuse_args *args, struct file *file,
a9ff4f87 2129 const struct file_lock *fl, int opcode, pid_t pid,
7078187a 2130 int flock, struct fuse_lk_in *inarg)
71421259 2131{
6131ffaa 2132 struct inode *inode = file_inode(file);
9c8ef561 2133 struct fuse_conn *fc = get_fuse_conn(inode);
71421259 2134 struct fuse_file *ff = file->private_data;
7078187a
MS
2135
2136 memset(inarg, 0, sizeof(*inarg));
2137 inarg->fh = ff->fh;
2138 inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
2139 inarg->lk.start = fl->fl_start;
2140 inarg->lk.end = fl->fl_end;
2141 inarg->lk.type = fl->fl_type;
2142 inarg->lk.pid = pid;
a9ff4f87 2143 if (flock)
7078187a
MS
2144 inarg->lk_flags |= FUSE_LK_FLOCK;
2145 args->in.h.opcode = opcode;
2146 args->in.h.nodeid = get_node_id(inode);
2147 args->in.numargs = 1;
2148 args->in.args[0].size = sizeof(*inarg);
2149 args->in.args[0].value = inarg;
71421259
MS
2150}
2151
2152static int fuse_getlk(struct file *file, struct file_lock *fl)
2153{
6131ffaa 2154 struct inode *inode = file_inode(file);
71421259 2155 struct fuse_conn *fc = get_fuse_conn(inode);
7078187a
MS
2156 FUSE_ARGS(args);
2157 struct fuse_lk_in inarg;
71421259
MS
2158 struct fuse_lk_out outarg;
2159 int err;
2160
7078187a
MS
2161 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
2162 args.out.numargs = 1;
2163 args.out.args[0].size = sizeof(outarg);
2164 args.out.args[0].value = &outarg;
2165 err = fuse_simple_request(fc, &args);
71421259 2166 if (!err)
0b6e9ea0 2167 err = convert_fuse_file_lock(fc, &outarg.lk, fl);
71421259
MS
2168
2169 return err;
2170}
2171
a9ff4f87 2172static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
71421259 2173{
6131ffaa 2174 struct inode *inode = file_inode(file);
71421259 2175 struct fuse_conn *fc = get_fuse_conn(inode);
7078187a
MS
2176 FUSE_ARGS(args);
2177 struct fuse_lk_in inarg;
71421259 2178 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
0b6e9ea0
SF
2179 struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL;
2180 pid_t pid_nr = pid_nr_ns(pid, fc->pid_ns);
71421259
MS
2181 int err;
2182
8fb47a4f 2183 if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
48e90761
MS
2184 /* NLM needs asynchronous locks, which we don't support yet */
2185 return -ENOLCK;
2186 }
2187
71421259 2188 /* Unlock on close is handled by the flush method */
50f2112c 2189 if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX)
71421259
MS
2190 return 0;
2191
0b6e9ea0 2192 fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
7078187a 2193 err = fuse_simple_request(fc, &args);
71421259 2194
a4d27e75
MS
2195 /* locking is restartable */
2196 if (err == -EINTR)
2197 err = -ERESTARTSYS;
7078187a 2198
71421259
MS
2199 return err;
2200}
2201
2202static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
2203{
6131ffaa 2204 struct inode *inode = file_inode(file);
71421259
MS
2205 struct fuse_conn *fc = get_fuse_conn(inode);
2206 int err;
2207
48e90761
MS
2208 if (cmd == F_CANCELLK) {
2209 err = 0;
2210 } else if (cmd == F_GETLK) {
71421259 2211 if (fc->no_lock) {
9d6a8c5c 2212 posix_test_lock(file, fl);
71421259
MS
2213 err = 0;
2214 } else
2215 err = fuse_getlk(file, fl);
2216 } else {
2217 if (fc->no_lock)
48e90761 2218 err = posix_lock_file(file, fl, NULL);
71421259 2219 else
a9ff4f87 2220 err = fuse_setlk(file, fl, 0);
71421259
MS
2221 }
2222 return err;
2223}
2224
a9ff4f87
MS
2225static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
2226{
6131ffaa 2227 struct inode *inode = file_inode(file);
a9ff4f87
MS
2228 struct fuse_conn *fc = get_fuse_conn(inode);
2229 int err;
2230
37fb3a30 2231 if (fc->no_flock) {
4f656367 2232 err = locks_lock_file_wait(file, fl);
a9ff4f87 2233 } else {
37fb3a30
MS
2234 struct fuse_file *ff = file->private_data;
2235
a9ff4f87 2236 /* emulate flock with POSIX locks */
37fb3a30 2237 ff->flock = true;
a9ff4f87
MS
2238 err = fuse_setlk(file, fl, 1);
2239 }
2240
2241 return err;
2242}
2243
b2d2272f
MS
2244static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
2245{
2246 struct inode *inode = mapping->host;
2247 struct fuse_conn *fc = get_fuse_conn(inode);
7078187a 2248 FUSE_ARGS(args);
b2d2272f
MS
2249 struct fuse_bmap_in inarg;
2250 struct fuse_bmap_out outarg;
2251 int err;
2252
2253 if (!inode->i_sb->s_bdev || fc->no_bmap)
2254 return 0;
2255
b2d2272f
MS
2256 memset(&inarg, 0, sizeof(inarg));
2257 inarg.block = block;
2258 inarg.blocksize = inode->i_sb->s_blocksize;
7078187a
MS
2259 args.in.h.opcode = FUSE_BMAP;
2260 args.in.h.nodeid = get_node_id(inode);
2261 args.in.numargs = 1;
2262 args.in.args[0].size = sizeof(inarg);
2263 args.in.args[0].value = &inarg;
2264 args.out.numargs = 1;
2265 args.out.args[0].size = sizeof(outarg);
2266 args.out.args[0].value = &outarg;
2267 err = fuse_simple_request(fc, &args);
b2d2272f
MS
2268 if (err == -ENOSYS)
2269 fc->no_bmap = 1;
2270
2271 return err ? 0 : outarg.block;
2272}
2273
0b5da8db
R
2274static loff_t fuse_lseek(struct file *file, loff_t offset, int whence)
2275{
2276 struct inode *inode = file->f_mapping->host;
2277 struct fuse_conn *fc = get_fuse_conn(inode);
2278 struct fuse_file *ff = file->private_data;
2279 FUSE_ARGS(args);
2280 struct fuse_lseek_in inarg = {
2281 .fh = ff->fh,
2282 .offset = offset,
2283 .whence = whence
2284 };
2285 struct fuse_lseek_out outarg;
2286 int err;
2287
2288 if (fc->no_lseek)
2289 goto fallback;
2290
2291 args.in.h.opcode = FUSE_LSEEK;
2292 args.in.h.nodeid = ff->nodeid;
2293 args.in.numargs = 1;
2294 args.in.args[0].size = sizeof(inarg);
2295 args.in.args[0].value = &inarg;
2296 args.out.numargs = 1;
2297 args.out.args[0].size = sizeof(outarg);
2298 args.out.args[0].value = &outarg;
2299 err = fuse_simple_request(fc, &args);
2300 if (err) {
2301 if (err == -ENOSYS) {
2302 fc->no_lseek = 1;
2303 goto fallback;
2304 }
2305 return err;
2306 }
2307
2308 return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes);
2309
2310fallback:
5b97eeac 2311 err = fuse_update_attributes(inode, file);
0b5da8db
R
2312 if (!err)
2313 return generic_file_llseek(file, offset, whence);
2314 else
2315 return err;
2316}
2317
965c8e59 2318static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
5559b8f4
MS
2319{
2320 loff_t retval;
6131ffaa 2321 struct inode *inode = file_inode(file);
5559b8f4 2322
0b5da8db
R
2323 switch (whence) {
2324 case SEEK_SET:
2325 case SEEK_CUR:
2326 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
965c8e59 2327 retval = generic_file_llseek(file, offset, whence);
0b5da8db
R
2328 break;
2329 case SEEK_END:
5955102c 2330 inode_lock(inode);
5b97eeac 2331 retval = fuse_update_attributes(inode, file);
0b5da8db
R
2332 if (!retval)
2333 retval = generic_file_llseek(file, offset, whence);
5955102c 2334 inode_unlock(inode);
0b5da8db
R
2335 break;
2336 case SEEK_HOLE:
2337 case SEEK_DATA:
5955102c 2338 inode_lock(inode);
0b5da8db 2339 retval = fuse_lseek(file, offset, whence);
5955102c 2340 inode_unlock(inode);
0b5da8db
R
2341 break;
2342 default:
2343 retval = -EINVAL;
2344 }
c07c3d19 2345
5559b8f4
MS
2346 return retval;
2347}
2348
d9d318d3
MS
2349/*
2350 * CUSE servers compiled on 32bit broke on 64bit kernels because the
2351 * ABI was defined to be 'struct iovec' which is different on 32bit
2352 * and 64bit. Fortunately we can determine which structure the server
2353 * used from the size of the reply.
2354 */
1baa26b2
MS
2355static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src,
2356 size_t transferred, unsigned count,
2357 bool is_compat)
d9d318d3
MS
2358{
2359#ifdef CONFIG_COMPAT
2360 if (count * sizeof(struct compat_iovec) == transferred) {
2361 struct compat_iovec *ciov = src;
2362 unsigned i;
2363
2364 /*
2365 * With this interface a 32bit server cannot support
2366 * non-compat (i.e. ones coming from 64bit apps) ioctl
2367 * requests
2368 */
2369 if (!is_compat)
2370 return -EINVAL;
2371
2372 for (i = 0; i < count; i++) {
2373 dst[i].iov_base = compat_ptr(ciov[i].iov_base);
2374 dst[i].iov_len = ciov[i].iov_len;
2375 }
2376 return 0;
2377 }
2378#endif
2379
2380 if (count * sizeof(struct iovec) != transferred)
2381 return -EIO;
2382
2383 memcpy(dst, src, transferred);
2384 return 0;
2385}
2386
7572777e
MS
2387/* Make sure iov_length() won't overflow */
2388static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
2389{
2390 size_t n;
2391 u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
2392
fb6ccff6 2393 for (n = 0; n < count; n++, iov++) {
7572777e
MS
2394 if (iov->iov_len > (size_t) max)
2395 return -ENOMEM;
2396 max -= iov->iov_len;
2397 }
2398 return 0;
2399}
2400
1baa26b2
MS
2401static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst,
2402 void *src, size_t transferred, unsigned count,
2403 bool is_compat)
2404{
2405 unsigned i;
2406 struct fuse_ioctl_iovec *fiov = src;
2407
2408 if (fc->minor < 16) {
2409 return fuse_copy_ioctl_iovec_old(dst, src, transferred,
2410 count, is_compat);
2411 }
2412
2413 if (count * sizeof(struct fuse_ioctl_iovec) != transferred)
2414 return -EIO;
2415
2416 for (i = 0; i < count; i++) {
2417 /* Did the server supply an inappropriate value? */
2418 if (fiov[i].base != (unsigned long) fiov[i].base ||
2419 fiov[i].len != (unsigned long) fiov[i].len)
2420 return -EIO;
2421
2422 dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base;
2423 dst[i].iov_len = (size_t) fiov[i].len;
2424
2425#ifdef CONFIG_COMPAT
2426 if (is_compat &&
2427 (ptr_to_compat(dst[i].iov_base) != fiov[i].base ||
2428 (compat_size_t) dst[i].iov_len != fiov[i].len))
2429 return -EIO;
2430#endif
2431 }
2432
2433 return 0;
2434}
2435
2436
59efec7b
TH
2437/*
2438 * For ioctls, there is no generic way to determine how much memory
2439 * needs to be read and/or written. Furthermore, ioctls are allowed
2440 * to dereference the passed pointer, so the parameter requires deep
2441 * copying but FUSE has no idea whatsoever about what to copy in or
2442 * out.
2443 *
2444 * This is solved by allowing FUSE server to retry ioctl with
2445 * necessary in/out iovecs. Let's assume the ioctl implementation
2446 * needs to read in the following structure.
2447 *
2448 * struct a {
2449 * char *buf;
2450 * size_t buflen;
2451 * }
2452 *
2453 * On the first callout to FUSE server, inarg->in_size and
2454 * inarg->out_size will be NULL; then, the server completes the ioctl
2455 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
2456 * the actual iov array to
2457 *
2458 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
2459 *
2460 * which tells FUSE to copy in the requested area and retry the ioctl.
2461 * On the second round, the server has access to the structure and
2462 * from that it can tell what to look for next, so on the invocation,
2463 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
2464 *
2465 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
2466 * { .iov_base = a.buf, .iov_len = a.buflen } }
2467 *
2468 * FUSE will copy both struct a and the pointed buffer from the
2469 * process doing the ioctl and retry ioctl with both struct a and the
2470 * buffer.
2471 *
2472 * This time, FUSE server has everything it needs and completes ioctl
2473 * without FUSE_IOCTL_RETRY which finishes the ioctl call.
2474 *
2475 * Copying data out works the same way.
2476 *
2477 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
2478 * automatically initializes in and out iovs by decoding @cmd with
2479 * _IOC_* macros and the server is not allowed to request RETRY. This
2480 * limits ioctl data transfers to well-formed ioctls and is the forced
2481 * behavior for all FUSE servers.
2482 */
08cbf542
TH
2483long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
2484 unsigned int flags)
59efec7b 2485{
59efec7b 2486 struct fuse_file *ff = file->private_data;
d36f2487 2487 struct fuse_conn *fc = ff->fc;
59efec7b
TH
2488 struct fuse_ioctl_in inarg = {
2489 .fh = ff->fh,
2490 .cmd = cmd,
2491 .arg = arg,
2492 .flags = flags
2493 };
2494 struct fuse_ioctl_out outarg;
2495 struct fuse_req *req = NULL;
2496 struct page **pages = NULL;
8ac83505 2497 struct iovec *iov_page = NULL;
59efec7b
TH
2498 struct iovec *in_iov = NULL, *out_iov = NULL;
2499 unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
acbe5fda
MS
2500 size_t in_size, out_size, transferred, c;
2501 int err, i;
2502 struct iov_iter ii;
59efec7b 2503
1baa26b2
MS
2504#if BITS_PER_LONG == 32
2505 inarg.flags |= FUSE_IOCTL_32BIT;
2506#else
2507 if (flags & FUSE_IOCTL_COMPAT)
2508 inarg.flags |= FUSE_IOCTL_32BIT;
2509#endif
2510
59efec7b 2511 /* assume all the iovs returned by client always fits in a page */
1baa26b2 2512 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
59efec7b 2513
59efec7b 2514 err = -ENOMEM;
c411cc88 2515 pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL);
8ac83505 2516 iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
59efec7b
TH
2517 if (!pages || !iov_page)
2518 goto out;
2519
2520 /*
2521 * If restricted, initialize IO parameters as encoded in @cmd.
2522 * RETRY from server is not allowed.
2523 */
2524 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
8ac83505 2525 struct iovec *iov = iov_page;
59efec7b 2526
c9f0523d 2527 iov->iov_base = (void __user *)arg;
59efec7b
TH
2528 iov->iov_len = _IOC_SIZE(cmd);
2529
2530 if (_IOC_DIR(cmd) & _IOC_WRITE) {
2531 in_iov = iov;
2532 in_iovs = 1;
2533 }
2534
2535 if (_IOC_DIR(cmd) & _IOC_READ) {
2536 out_iov = iov;
2537 out_iovs = 1;
2538 }
2539 }
2540
2541 retry:
2542 inarg.in_size = in_size = iov_length(in_iov, in_iovs);
2543 inarg.out_size = out_size = iov_length(out_iov, out_iovs);
2544
2545 /*
2546 * Out data can be used either for actual out data or iovs,
2547 * make sure there always is at least one page.
2548 */
2549 out_size = max_t(size_t, out_size, PAGE_SIZE);
2550 max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
2551
2552 /* make sure there are enough buffer pages and init request with them */
2553 err = -ENOMEM;
2554 if (max_pages > FUSE_MAX_PAGES_PER_REQ)
2555 goto out;
2556 while (num_pages < max_pages) {
2557 pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2558 if (!pages[num_pages])
2559 goto out;
2560 num_pages++;
2561 }
2562
54b96670 2563 req = fuse_get_req(fc, num_pages);
59efec7b
TH
2564 if (IS_ERR(req)) {
2565 err = PTR_ERR(req);
2566 req = NULL;
2567 goto out;
2568 }
2569 memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
2570 req->num_pages = num_pages;
7c190c8b 2571 fuse_page_descs_length_init(req, 0, req->num_pages);
59efec7b
TH
2572
2573 /* okay, let's send it to the client */
2574 req->in.h.opcode = FUSE_IOCTL;
d36f2487 2575 req->in.h.nodeid = ff->nodeid;
59efec7b
TH
2576 req->in.numargs = 1;
2577 req->in.args[0].size = sizeof(inarg);
2578 req->in.args[0].value = &inarg;
2579 if (in_size) {
2580 req->in.numargs++;
2581 req->in.args[1].size = in_size;
2582 req->in.argpages = 1;
2583
acbe5fda
MS
2584 err = -EFAULT;
2585 iov_iter_init(&ii, WRITE, in_iov, in_iovs, in_size);
2586 for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= num_pages); i++) {
2587 c = copy_page_from_iter(pages[i], 0, PAGE_SIZE, &ii);
2588 if (c != PAGE_SIZE && iov_iter_count(&ii))
2589 goto out;
2590 }
59efec7b
TH
2591 }
2592
2593 req->out.numargs = 2;
2594 req->out.args[0].size = sizeof(outarg);
2595 req->out.args[0].value = &outarg;
2596 req->out.args[1].size = out_size;
2597 req->out.argpages = 1;
2598 req->out.argvar = 1;
2599
b93f858a 2600 fuse_request_send(fc, req);
59efec7b
TH
2601 err = req->out.h.error;
2602 transferred = req->out.args[1].size;
2603 fuse_put_request(fc, req);
2604 req = NULL;
2605 if (err)
2606 goto out;
2607
2608 /* did it ask for retry? */
2609 if (outarg.flags & FUSE_IOCTL_RETRY) {
8ac83505 2610 void *vaddr;
59efec7b
TH
2611
2612 /* no retry if in restricted mode */
2613 err = -EIO;
2614 if (!(flags & FUSE_IOCTL_UNRESTRICTED))
2615 goto out;
2616
2617 in_iovs = outarg.in_iovs;
2618 out_iovs = outarg.out_iovs;
2619
2620 /*
2621 * Make sure things are in boundary, separate checks
2622 * are to protect against overflow.
2623 */
2624 err = -ENOMEM;
2625 if (in_iovs > FUSE_IOCTL_MAX_IOV ||
2626 out_iovs > FUSE_IOCTL_MAX_IOV ||
2627 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
2628 goto out;
2629
2408f6ef 2630 vaddr = kmap_atomic(pages[0]);
1baa26b2 2631 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
d9d318d3
MS
2632 transferred, in_iovs + out_iovs,
2633 (flags & FUSE_IOCTL_COMPAT) != 0);
2408f6ef 2634 kunmap_atomic(vaddr);
d9d318d3
MS
2635 if (err)
2636 goto out;
59efec7b 2637
8ac83505 2638 in_iov = iov_page;
59efec7b
TH
2639 out_iov = in_iov + in_iovs;
2640
7572777e
MS
2641 err = fuse_verify_ioctl_iov(in_iov, in_iovs);
2642 if (err)
2643 goto out;
2644
2645 err = fuse_verify_ioctl_iov(out_iov, out_iovs);
2646 if (err)
2647 goto out;
2648
59efec7b
TH
2649 goto retry;
2650 }
2651
2652 err = -EIO;
2653 if (transferred > inarg.out_size)
2654 goto out;
2655
acbe5fda
MS
2656 err = -EFAULT;
2657 iov_iter_init(&ii, READ, out_iov, out_iovs, transferred);
2658 for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= num_pages); i++) {
2659 c = copy_page_to_iter(pages[i], 0, PAGE_SIZE, &ii);
2660 if (c != PAGE_SIZE && iov_iter_count(&ii))
2661 goto out;
2662 }
2663 err = 0;
59efec7b
TH
2664 out:
2665 if (req)
2666 fuse_put_request(fc, req);
8ac83505 2667 free_page((unsigned long) iov_page);
59efec7b
TH
2668 while (num_pages)
2669 __free_page(pages[--num_pages]);
2670 kfree(pages);
2671
2672 return err ? err : outarg.result;
2673}
08cbf542 2674EXPORT_SYMBOL_GPL(fuse_do_ioctl);
59efec7b 2675
b18da0c5
MS
2676long fuse_ioctl_common(struct file *file, unsigned int cmd,
2677 unsigned long arg, unsigned int flags)
d36f2487 2678{
6131ffaa 2679 struct inode *inode = file_inode(file);
d36f2487
MS
2680 struct fuse_conn *fc = get_fuse_conn(inode);
2681
c2132c1b 2682 if (!fuse_allow_current_process(fc))
d36f2487
MS
2683 return -EACCES;
2684
2685 if (is_bad_inode(inode))
2686 return -EIO;
2687
2688 return fuse_do_ioctl(file, cmd, arg, flags);
2689}
2690
59efec7b
TH
2691static long fuse_file_ioctl(struct file *file, unsigned int cmd,
2692 unsigned long arg)
2693{
b18da0c5 2694 return fuse_ioctl_common(file, cmd, arg, 0);
59efec7b
TH
2695}
2696
2697static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
2698 unsigned long arg)
2699{
b18da0c5 2700 return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT);
59efec7b
TH
2701}
2702
95668a69
TH
2703/*
2704 * All files which have been polled are linked to RB tree
2705 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2706 * find the matching one.
2707 */
2708static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
2709 struct rb_node **parent_out)
2710{
2711 struct rb_node **link = &fc->polled_files.rb_node;
2712 struct rb_node *last = NULL;
2713
2714 while (*link) {
2715 struct fuse_file *ff;
2716
2717 last = *link;
2718 ff = rb_entry(last, struct fuse_file, polled_node);
2719
2720 if (kh < ff->kh)
2721 link = &last->rb_left;
2722 else if (kh > ff->kh)
2723 link = &last->rb_right;
2724 else
2725 return link;
2726 }
2727
2728 if (parent_out)
2729 *parent_out = last;
2730 return link;
2731}
2732
2733/*
2734 * The file is about to be polled. Make sure it's on the polled_files
2735 * RB tree. Note that files once added to the polled_files tree are
2736 * not removed before the file is released. This is because a file
2737 * polled once is likely to be polled again.
2738 */
2739static void fuse_register_polled_file(struct fuse_conn *fc,
2740 struct fuse_file *ff)
2741{
2742 spin_lock(&fc->lock);
2743 if (RB_EMPTY_NODE(&ff->polled_node)) {
f3846266 2744 struct rb_node **link, *uninitialized_var(parent);
95668a69
TH
2745
2746 link = fuse_find_polled_node(fc, ff->kh, &parent);
2747 BUG_ON(*link);
2748 rb_link_node(&ff->polled_node, parent, link);
2749 rb_insert_color(&ff->polled_node, &fc->polled_files);
2750 }
2751 spin_unlock(&fc->lock);
2752}
2753
08cbf542 2754unsigned fuse_file_poll(struct file *file, poll_table *wait)
95668a69 2755{
95668a69 2756 struct fuse_file *ff = file->private_data;
797759aa 2757 struct fuse_conn *fc = ff->fc;
95668a69
TH
2758 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
2759 struct fuse_poll_out outarg;
7078187a 2760 FUSE_ARGS(args);
95668a69
TH
2761 int err;
2762
2763 if (fc->no_poll)
2764 return DEFAULT_POLLMASK;
2765
2766 poll_wait(file, &ff->poll_wait, wait);
0415d291 2767 inarg.events = (__u32)poll_requested_events(wait);
95668a69
TH
2768
2769 /*
2770 * Ask for notification iff there's someone waiting for it.
2771 * The client may ignore the flag and always notify.
2772 */
2773 if (waitqueue_active(&ff->poll_wait)) {
2774 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
2775 fuse_register_polled_file(fc, ff);
2776 }
2777
7078187a
MS
2778 args.in.h.opcode = FUSE_POLL;
2779 args.in.h.nodeid = ff->nodeid;
2780 args.in.numargs = 1;
2781 args.in.args[0].size = sizeof(inarg);
2782 args.in.args[0].value = &inarg;
2783 args.out.numargs = 1;
2784 args.out.args[0].size = sizeof(outarg);
2785 args.out.args[0].value = &outarg;
2786 err = fuse_simple_request(fc, &args);
95668a69
TH
2787
2788 if (!err)
2789 return outarg.revents;
2790 if (err == -ENOSYS) {
2791 fc->no_poll = 1;
2792 return DEFAULT_POLLMASK;
2793 }
2794 return POLLERR;
2795}
08cbf542 2796EXPORT_SYMBOL_GPL(fuse_file_poll);
95668a69
TH
2797
2798/*
2799 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2800 * wakes up the poll waiters.
2801 */
2802int fuse_notify_poll_wakeup(struct fuse_conn *fc,
2803 struct fuse_notify_poll_wakeup_out *outarg)
2804{
2805 u64 kh = outarg->kh;
2806 struct rb_node **link;
2807
2808 spin_lock(&fc->lock);
2809
2810 link = fuse_find_polled_node(fc, kh, NULL);
2811 if (*link) {
2812 struct fuse_file *ff;
2813
2814 ff = rb_entry(*link, struct fuse_file, polled_node);
2815 wake_up_interruptible_sync(&ff->poll_wait);
2816 }
2817
2818 spin_unlock(&fc->lock);
2819 return 0;
2820}
2821
efb9fa9e
MP
2822static void fuse_do_truncate(struct file *file)
2823{
2824 struct inode *inode = file->f_mapping->host;
2825 struct iattr attr;
2826
2827 attr.ia_valid = ATTR_SIZE;
2828 attr.ia_size = i_size_read(inode);
2829
2830 attr.ia_file = file;
2831 attr.ia_valid |= ATTR_FILE;
2832
62490330 2833 fuse_do_setattr(file_dentry(file), &attr, file);
efb9fa9e
MP
2834}
2835
e5c5f05d
MP
2836static inline loff_t fuse_round_up(loff_t off)
2837{
2838 return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
2839}
2840
4273b793 2841static ssize_t
c8b8e32d 2842fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
4273b793 2843{
9d5722b7 2844 DECLARE_COMPLETION_ONSTACK(wait);
4273b793 2845 ssize_t ret = 0;
60b9df7a
MS
2846 struct file *file = iocb->ki_filp;
2847 struct fuse_file *ff = file->private_data;
e5c5f05d 2848 bool async_dio = ff->fc->async_dio;
4273b793 2849 loff_t pos = 0;
bcba24cc
MP
2850 struct inode *inode;
2851 loff_t i_size;
a6cbcd4a 2852 size_t count = iov_iter_count(iter);
c8b8e32d 2853 loff_t offset = iocb->ki_pos;
36cf66ed 2854 struct fuse_io_priv *io;
4273b793 2855
4273b793 2856 pos = offset;
bcba24cc
MP
2857 inode = file->f_mapping->host;
2858 i_size = i_size_read(inode);
4273b793 2859
6f673763 2860 if ((iov_iter_rw(iter) == READ) && (offset > i_size))
9fe55eea
SW
2861 return 0;
2862
439ee5f0 2863 /* optimization for short read */
6f673763 2864 if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) {
439ee5f0
MP
2865 if (offset >= i_size)
2866 return 0;
6b775b18
AV
2867 iov_iter_truncate(iter, fuse_round_up(i_size - offset));
2868 count = iov_iter_count(iter);
439ee5f0
MP
2869 }
2870
bcba24cc 2871 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
36cf66ed
MP
2872 if (!io)
2873 return -ENOMEM;
bcba24cc 2874 spin_lock_init(&io->lock);
744742d6 2875 kref_init(&io->refcnt);
bcba24cc
MP
2876 io->reqs = 1;
2877 io->bytes = -1;
2878 io->size = 0;
2879 io->offset = offset;
6f673763 2880 io->write = (iov_iter_rw(iter) == WRITE);
bcba24cc 2881 io->err = 0;
bcba24cc
MP
2882 /*
2883 * By default, we want to optimize all I/Os with async request
60b9df7a 2884 * submission to the client filesystem if supported.
bcba24cc 2885 */
e5c5f05d 2886 io->async = async_dio;
bcba24cc 2887 io->iocb = iocb;
7879c4e5 2888 io->blocking = is_sync_kiocb(iocb);
bcba24cc
MP
2889
2890 /*
7879c4e5
AS
2891 * We cannot asynchronously extend the size of a file.
2892 * In such case the aio will behave exactly like sync io.
bcba24cc 2893 */
7879c4e5
AS
2894 if ((offset + count > i_size) && iov_iter_rw(iter) == WRITE)
2895 io->blocking = true;
4273b793 2896
7879c4e5 2897 if (io->async && io->blocking) {
744742d6
SF
2898 /*
2899 * Additional reference to keep io around after
2900 * calling fuse_aio_complete()
2901 */
2902 kref_get(&io->refcnt);
9d5722b7 2903 io->done = &wait;
744742d6 2904 }
9d5722b7 2905
6f673763 2906 if (iov_iter_rw(iter) == WRITE) {
6b775b18 2907 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
812408fb
AV
2908 fuse_invalidate_attr(inode);
2909 } else {
d22a943f 2910 ret = __fuse_direct_read(io, iter, &pos);
812408fb 2911 }
36cf66ed 2912
bcba24cc
MP
2913 if (io->async) {
2914 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
2915
2916 /* we have a non-extending, async request, so return */
7879c4e5 2917 if (!io->blocking)
bcba24cc
MP
2918 return -EIOCBQUEUED;
2919
9d5722b7
CH
2920 wait_for_completion(&wait);
2921 ret = fuse_get_res_by_io(io);
bcba24cc
MP
2922 }
2923
744742d6 2924 kref_put(&io->refcnt, fuse_io_release);
9d5722b7 2925
6f673763 2926 if (iov_iter_rw(iter) == WRITE) {
efb9fa9e
MP
2927 if (ret > 0)
2928 fuse_write_update_size(inode, pos);
2929 else if (ret < 0 && offset + count > i_size)
2930 fuse_do_truncate(file);
2931 }
4273b793
AA
2932
2933 return ret;
2934}
2935
cdadb11c
MS
2936static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2937 loff_t length)
05ba1f08
AP
2938{
2939 struct fuse_file *ff = file->private_data;
1c68271c 2940 struct inode *inode = file_inode(file);
0ab08f57 2941 struct fuse_inode *fi = get_fuse_inode(inode);
05ba1f08 2942 struct fuse_conn *fc = ff->fc;
7078187a 2943 FUSE_ARGS(args);
05ba1f08
AP
2944 struct fuse_fallocate_in inarg = {
2945 .fh = ff->fh,
2946 .offset = offset,
2947 .length = length,
2948 .mode = mode
2949 };
2950 int err;
14c14414
MP
2951 bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
2952 (mode & FALLOC_FL_PUNCH_HOLE);
05ba1f08 2953
4adb8302
MS
2954 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2955 return -EOPNOTSUPP;
2956
519c6040
MS
2957 if (fc->no_fallocate)
2958 return -EOPNOTSUPP;
2959
14c14414 2960 if (lock_inode) {
5955102c 2961 inode_lock(inode);
bde52788
MP
2962 if (mode & FALLOC_FL_PUNCH_HOLE) {
2963 loff_t endbyte = offset + length - 1;
2964 err = filemap_write_and_wait_range(inode->i_mapping,
2965 offset, endbyte);
2966 if (err)
2967 goto out;
2968
2969 fuse_sync_writes(inode);
2970 }
3634a632
BF
2971 }
2972
0ab08f57
MP
2973 if (!(mode & FALLOC_FL_KEEP_SIZE))
2974 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2975
7078187a
MS
2976 args.in.h.opcode = FUSE_FALLOCATE;
2977 args.in.h.nodeid = ff->nodeid;
2978 args.in.numargs = 1;
2979 args.in.args[0].size = sizeof(inarg);
2980 args.in.args[0].value = &inarg;
2981 err = fuse_simple_request(fc, &args);
519c6040
MS
2982 if (err == -ENOSYS) {
2983 fc->no_fallocate = 1;
2984 err = -EOPNOTSUPP;
2985 }
bee6c307
BF
2986 if (err)
2987 goto out;
2988
2989 /* we could have extended the file */
b0aa7606
MP
2990 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
2991 bool changed = fuse_write_update_size(inode, offset + length);
2992
93d2269d
MS
2993 if (changed && fc->writeback_cache)
2994 file_update_time(file);
b0aa7606 2995 }
bee6c307
BF
2996
2997 if (mode & FALLOC_FL_PUNCH_HOLE)
2998 truncate_pagecache_range(inode, offset, offset + length - 1);
2999
3000 fuse_invalidate_attr(inode);
3001
3634a632 3002out:
0ab08f57
MP
3003 if (!(mode & FALLOC_FL_KEEP_SIZE))
3004 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3005
bde52788 3006 if (lock_inode)
5955102c 3007 inode_unlock(inode);
3634a632 3008
05ba1f08
AP
3009 return err;
3010}
05ba1f08 3011
4b6f5d20 3012static const struct file_operations fuse_file_operations = {
5559b8f4 3013 .llseek = fuse_file_llseek,
37c20f16 3014 .read_iter = fuse_file_read_iter,
84c3d55c 3015 .write_iter = fuse_file_write_iter,
b6aeaded
MS
3016 .mmap = fuse_file_mmap,
3017 .open = fuse_open,
3018 .flush = fuse_flush,
3019 .release = fuse_release,
3020 .fsync = fuse_fsync,
71421259 3021 .lock = fuse_file_lock,
a9ff4f87 3022 .flock = fuse_file_flock,
5ffc4ef4 3023 .splice_read = generic_file_splice_read,
59efec7b
TH
3024 .unlocked_ioctl = fuse_file_ioctl,
3025 .compat_ioctl = fuse_file_compat_ioctl,
95668a69 3026 .poll = fuse_file_poll,
05ba1f08 3027 .fallocate = fuse_file_fallocate,
b6aeaded
MS
3028};
3029
4b6f5d20 3030static const struct file_operations fuse_direct_io_file_operations = {
5559b8f4 3031 .llseek = fuse_file_llseek,
15316263 3032 .read_iter = fuse_direct_read_iter,
15316263 3033 .write_iter = fuse_direct_write_iter,
fc280c96 3034 .mmap = fuse_direct_mmap,
413ef8cb
MS
3035 .open = fuse_open,
3036 .flush = fuse_flush,
3037 .release = fuse_release,
3038 .fsync = fuse_fsync,
71421259 3039 .lock = fuse_file_lock,
a9ff4f87 3040 .flock = fuse_file_flock,
59efec7b
TH
3041 .unlocked_ioctl = fuse_file_ioctl,
3042 .compat_ioctl = fuse_file_compat_ioctl,
95668a69 3043 .poll = fuse_file_poll,
05ba1f08 3044 .fallocate = fuse_file_fallocate,
fc280c96 3045 /* no splice_read */
413ef8cb
MS
3046};
3047
f5e54d6e 3048static const struct address_space_operations fuse_file_aops = {
b6aeaded 3049 .readpage = fuse_readpage,
3be5a52b 3050 .writepage = fuse_writepage,
26d614df 3051 .writepages = fuse_writepages,
3be5a52b 3052 .launder_page = fuse_launder_page,
db50b96c 3053 .readpages = fuse_readpages,
3be5a52b 3054 .set_page_dirty = __set_page_dirty_nobuffers,
b2d2272f 3055 .bmap = fuse_bmap,
4273b793 3056 .direct_IO = fuse_direct_IO,
6b12c1b3
PE
3057 .write_begin = fuse_write_begin,
3058 .write_end = fuse_write_end,
b6aeaded
MS
3059};
3060
3061void fuse_init_file_inode(struct inode *inode)
3062{
45323fb7
MS
3063 inode->i_fop = &fuse_file_operations;
3064 inode->i_data.a_ops = &fuse_file_aops;
b6aeaded 3065}