]>
Commit | Line | Data |
---|---|---|
b6aeaded MS |
1 | /* |
2 | FUSE: Filesystem in Userspace | |
1729a16c | 3 | Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> |
b6aeaded MS |
4 | |
5 | This program can be distributed under the terms of the GNU GPL. | |
6 | See the file COPYING. | |
7 | */ | |
8 | ||
9 | #include "fuse_i.h" | |
10 | ||
11 | #include <linux/pagemap.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/kernel.h> | |
e8edc6e0 | 14 | #include <linux/sched.h> |
08cbf542 | 15 | #include <linux/module.h> |
d9d318d3 | 16 | #include <linux/compat.h> |
478e0841 | 17 | #include <linux/swap.h> |
a27bb332 | 18 | #include <linux/aio.h> |
3634a632 | 19 | #include <linux/falloc.h> |
b6aeaded | 20 | |
4b6f5d20 | 21 | static const struct file_operations fuse_direct_io_file_operations; |
45323fb7 | 22 | |
91fe96b4 MS |
23 | static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, |
24 | int opcode, struct fuse_open_out *outargp) | |
b6aeaded | 25 | { |
b6aeaded | 26 | struct fuse_open_in inarg; |
fd72faac MS |
27 | struct fuse_req *req; |
28 | int err; | |
29 | ||
b111c8c0 | 30 | req = fuse_get_req_nopages(fc); |
ce1d5a49 MS |
31 | if (IS_ERR(req)) |
32 | return PTR_ERR(req); | |
fd72faac MS |
33 | |
34 | memset(&inarg, 0, sizeof(inarg)); | |
6ff958ed MS |
35 | inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); |
36 | if (!fc->atomic_o_trunc) | |
37 | inarg.flags &= ~O_TRUNC; | |
91fe96b4 MS |
38 | req->in.h.opcode = opcode; |
39 | req->in.h.nodeid = nodeid; | |
fd72faac MS |
40 | req->in.numargs = 1; |
41 | req->in.args[0].size = sizeof(inarg); | |
42 | req->in.args[0].value = &inarg; | |
43 | req->out.numargs = 1; | |
44 | req->out.args[0].size = sizeof(*outargp); | |
45 | req->out.args[0].value = outargp; | |
b93f858a | 46 | fuse_request_send(fc, req); |
fd72faac MS |
47 | err = req->out.h.error; |
48 | fuse_put_request(fc, req); | |
49 | ||
50 | return err; | |
51 | } | |
52 | ||
acf99433 | 53 | struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) |
fd72faac MS |
54 | { |
55 | struct fuse_file *ff; | |
6b2db28a | 56 | |
fd72faac | 57 | ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); |
6b2db28a TH |
58 | if (unlikely(!ff)) |
59 | return NULL; | |
60 | ||
da5e4714 | 61 | ff->fc = fc; |
4250c066 | 62 | ff->reserved_req = fuse_request_alloc(0); |
6b2db28a TH |
63 | if (unlikely(!ff->reserved_req)) { |
64 | kfree(ff); | |
65 | return NULL; | |
fd72faac | 66 | } |
6b2db28a TH |
67 | |
68 | INIT_LIST_HEAD(&ff->write_entry); | |
69 | atomic_set(&ff->count, 0); | |
70 | RB_CLEAR_NODE(&ff->polled_node); | |
71 | init_waitqueue_head(&ff->poll_wait); | |
72 | ||
73 | spin_lock(&fc->lock); | |
74 | ff->kh = ++fc->khctr; | |
75 | spin_unlock(&fc->lock); | |
76 | ||
fd72faac MS |
77 | return ff; |
78 | } | |
79 | ||
80 | void fuse_file_free(struct fuse_file *ff) | |
81 | { | |
33649c91 | 82 | fuse_request_free(ff->reserved_req); |
fd72faac MS |
83 | kfree(ff); |
84 | } | |
85 | ||
c7b7143c | 86 | struct fuse_file *fuse_file_get(struct fuse_file *ff) |
c756e0a4 MS |
87 | { |
88 | atomic_inc(&ff->count); | |
89 | return ff; | |
90 | } | |
91 | ||
5a18ec17 MS |
92 | static void fuse_release_async(struct work_struct *work) |
93 | { | |
94 | struct fuse_req *req; | |
95 | struct fuse_conn *fc; | |
96 | struct path path; | |
97 | ||
98 | req = container_of(work, struct fuse_req, misc.release.work); | |
99 | path = req->misc.release.path; | |
100 | fc = get_fuse_conn(path.dentry->d_inode); | |
101 | ||
102 | fuse_put_request(fc, req); | |
103 | path_put(&path); | |
104 | } | |
105 | ||
819c4b3b MS |
106 | static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) |
107 | { | |
5a18ec17 MS |
108 | if (fc->destroy_req) { |
109 | /* | |
110 | * If this is a fuseblk mount, then it's possible that | |
111 | * releasing the path will result in releasing the | |
112 | * super block and sending the DESTROY request. If | |
113 | * the server is single threaded, this would hang. | |
114 | * For this reason do the path_put() in a separate | |
115 | * thread. | |
116 | */ | |
117 | atomic_inc(&req->count); | |
118 | INIT_WORK(&req->misc.release.work, fuse_release_async); | |
119 | schedule_work(&req->misc.release.work); | |
120 | } else { | |
121 | path_put(&req->misc.release.path); | |
122 | } | |
819c4b3b MS |
123 | } |
124 | ||
5a18ec17 | 125 | static void fuse_file_put(struct fuse_file *ff, bool sync) |
c756e0a4 MS |
126 | { |
127 | if (atomic_dec_and_test(&ff->count)) { | |
128 | struct fuse_req *req = ff->reserved_req; | |
8b0797a4 | 129 | |
7678ac50 AG |
130 | if (ff->fc->no_open) { |
131 | /* | |
132 | * Drop the release request when client does not | |
133 | * implement 'open' | |
134 | */ | |
135 | req->background = 0; | |
136 | path_put(&req->misc.release.path); | |
137 | fuse_put_request(ff->fc, req); | |
138 | } else if (sync) { | |
8b41e671 | 139 | req->background = 0; |
5a18ec17 MS |
140 | fuse_request_send(ff->fc, req); |
141 | path_put(&req->misc.release.path); | |
142 | fuse_put_request(ff->fc, req); | |
143 | } else { | |
144 | req->end = fuse_release_end; | |
8b41e671 | 145 | req->background = 1; |
5a18ec17 MS |
146 | fuse_request_send_background(ff->fc, req); |
147 | } | |
c756e0a4 MS |
148 | kfree(ff); |
149 | } | |
150 | } | |
151 | ||
08cbf542 TH |
152 | int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, |
153 | bool isdir) | |
91fe96b4 | 154 | { |
91fe96b4 | 155 | struct fuse_file *ff; |
91fe96b4 MS |
156 | int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; |
157 | ||
158 | ff = fuse_file_alloc(fc); | |
159 | if (!ff) | |
160 | return -ENOMEM; | |
161 | ||
7678ac50 AG |
162 | ff->fh = 0; |
163 | ff->open_flags = FOPEN_KEEP_CACHE; /* Default for no-open */ | |
164 | if (!fc->no_open || isdir) { | |
165 | struct fuse_open_out outarg; | |
166 | int err; | |
167 | ||
168 | err = fuse_send_open(fc, nodeid, file, opcode, &outarg); | |
169 | if (!err) { | |
170 | ff->fh = outarg.fh; | |
171 | ff->open_flags = outarg.open_flags; | |
172 | ||
173 | } else if (err != -ENOSYS || isdir) { | |
174 | fuse_file_free(ff); | |
175 | return err; | |
176 | } else { | |
177 | fc->no_open = 1; | |
178 | } | |
91fe96b4 MS |
179 | } |
180 | ||
181 | if (isdir) | |
7678ac50 | 182 | ff->open_flags &= ~FOPEN_DIRECT_IO; |
91fe96b4 | 183 | |
91fe96b4 | 184 | ff->nodeid = nodeid; |
91fe96b4 MS |
185 | file->private_data = fuse_file_get(ff); |
186 | ||
187 | return 0; | |
188 | } | |
08cbf542 | 189 | EXPORT_SYMBOL_GPL(fuse_do_open); |
91fe96b4 | 190 | |
650b22b9 PE |
191 | static void fuse_link_write_file(struct file *file) |
192 | { | |
193 | struct inode *inode = file_inode(file); | |
194 | struct fuse_conn *fc = get_fuse_conn(inode); | |
195 | struct fuse_inode *fi = get_fuse_inode(inode); | |
196 | struct fuse_file *ff = file->private_data; | |
197 | /* | |
198 | * file may be written through mmap, so chain it onto the | |
199 | * inodes's write_file list | |
200 | */ | |
201 | spin_lock(&fc->lock); | |
202 | if (list_empty(&ff->write_entry)) | |
203 | list_add(&ff->write_entry, &fi->write_files); | |
204 | spin_unlock(&fc->lock); | |
205 | } | |
206 | ||
c7b7143c | 207 | void fuse_finish_open(struct inode *inode, struct file *file) |
fd72faac | 208 | { |
c7b7143c | 209 | struct fuse_file *ff = file->private_data; |
a0822c55 | 210 | struct fuse_conn *fc = get_fuse_conn(inode); |
c7b7143c MS |
211 | |
212 | if (ff->open_flags & FOPEN_DIRECT_IO) | |
fd72faac | 213 | file->f_op = &fuse_direct_io_file_operations; |
c7b7143c | 214 | if (!(ff->open_flags & FOPEN_KEEP_CACHE)) |
b1009979 | 215 | invalidate_inode_pages2(inode->i_mapping); |
c7b7143c | 216 | if (ff->open_flags & FOPEN_NONSEEKABLE) |
a7c1b990 | 217 | nonseekable_open(inode, file); |
a0822c55 KS |
218 | if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { |
219 | struct fuse_inode *fi = get_fuse_inode(inode); | |
220 | ||
221 | spin_lock(&fc->lock); | |
222 | fi->attr_version = ++fc->attr_version; | |
223 | i_size_write(inode, 0); | |
224 | spin_unlock(&fc->lock); | |
225 | fuse_invalidate_attr(inode); | |
226 | } | |
4d99ff8f PE |
227 | if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) |
228 | fuse_link_write_file(file); | |
fd72faac MS |
229 | } |
230 | ||
91fe96b4 | 231 | int fuse_open_common(struct inode *inode, struct file *file, bool isdir) |
fd72faac | 232 | { |
acf99433 | 233 | struct fuse_conn *fc = get_fuse_conn(inode); |
b6aeaded | 234 | int err; |
b6aeaded MS |
235 | |
236 | err = generic_file_open(inode, file); | |
237 | if (err) | |
238 | return err; | |
239 | ||
91fe96b4 | 240 | err = fuse_do_open(fc, get_node_id(inode), file, isdir); |
fd72faac | 241 | if (err) |
91fe96b4 | 242 | return err; |
b6aeaded | 243 | |
91fe96b4 MS |
244 | fuse_finish_open(inode, file); |
245 | ||
246 | return 0; | |
b6aeaded MS |
247 | } |
248 | ||
8b0797a4 | 249 | static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) |
64c6d8ed | 250 | { |
8b0797a4 | 251 | struct fuse_conn *fc = ff->fc; |
33649c91 | 252 | struct fuse_req *req = ff->reserved_req; |
b57d4264 | 253 | struct fuse_release_in *inarg = &req->misc.release.in; |
b6aeaded | 254 | |
8b0797a4 MS |
255 | spin_lock(&fc->lock); |
256 | list_del(&ff->write_entry); | |
257 | if (!RB_EMPTY_NODE(&ff->polled_node)) | |
258 | rb_erase(&ff->polled_node, &fc->polled_files); | |
259 | spin_unlock(&fc->lock); | |
260 | ||
357ccf2b | 261 | wake_up_interruptible_all(&ff->poll_wait); |
8b0797a4 | 262 | |
b6aeaded | 263 | inarg->fh = ff->fh; |
fd72faac | 264 | inarg->flags = flags; |
51eb01e7 | 265 | req->in.h.opcode = opcode; |
c7b7143c | 266 | req->in.h.nodeid = ff->nodeid; |
b6aeaded MS |
267 | req->in.numargs = 1; |
268 | req->in.args[0].size = sizeof(struct fuse_release_in); | |
269 | req->in.args[0].value = inarg; | |
fd72faac MS |
270 | } |
271 | ||
8b0797a4 | 272 | void fuse_release_common(struct file *file, int opcode) |
fd72faac | 273 | { |
6b2db28a TH |
274 | struct fuse_file *ff; |
275 | struct fuse_req *req; | |
b6aeaded | 276 | |
6b2db28a TH |
277 | ff = file->private_data; |
278 | if (unlikely(!ff)) | |
8b0797a4 | 279 | return; |
6b2db28a | 280 | |
6b2db28a | 281 | req = ff->reserved_req; |
8b0797a4 | 282 | fuse_prepare_release(ff, file->f_flags, opcode); |
6b2db28a | 283 | |
37fb3a30 MS |
284 | if (ff->flock) { |
285 | struct fuse_release_in *inarg = &req->misc.release.in; | |
286 | inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; | |
287 | inarg->lock_owner = fuse_lock_owner_id(ff->fc, | |
288 | (fl_owner_t) file); | |
289 | } | |
6b2db28a | 290 | /* Hold vfsmount and dentry until release is finished */ |
b0be46eb MS |
291 | path_get(&file->f_path); |
292 | req->misc.release.path = file->f_path; | |
6b2db28a | 293 | |
6b2db28a TH |
294 | /* |
295 | * Normally this will send the RELEASE request, however if | |
296 | * some asynchronous READ or WRITE requests are outstanding, | |
297 | * the sending will be delayed. | |
5a18ec17 MS |
298 | * |
299 | * Make the release synchronous if this is a fuseblk mount, | |
300 | * synchronous RELEASE is allowed (and desirable) in this case | |
301 | * because the server can be trusted not to screw up. | |
6b2db28a | 302 | */ |
5a18ec17 | 303 | fuse_file_put(ff, ff->fc->destroy_req != NULL); |
b6aeaded MS |
304 | } |
305 | ||
04730fef MS |
306 | static int fuse_open(struct inode *inode, struct file *file) |
307 | { | |
91fe96b4 | 308 | return fuse_open_common(inode, file, false); |
04730fef MS |
309 | } |
310 | ||
311 | static int fuse_release(struct inode *inode, struct file *file) | |
312 | { | |
e7cc133c PE |
313 | struct fuse_conn *fc = get_fuse_conn(inode); |
314 | ||
315 | /* see fuse_vma_close() for !writeback_cache case */ | |
316 | if (fc->writeback_cache) | |
317 | filemap_write_and_wait(file->f_mapping); | |
318 | ||
b0aa7606 MP |
319 | if (test_bit(FUSE_I_MTIME_DIRTY, &get_fuse_inode(inode)->state)) |
320 | fuse_flush_mtime(file, true); | |
321 | ||
8b0797a4 MS |
322 | fuse_release_common(file, FUSE_RELEASE); |
323 | ||
324 | /* return value is ignored by VFS */ | |
325 | return 0; | |
326 | } | |
327 | ||
328 | void fuse_sync_release(struct fuse_file *ff, int flags) | |
329 | { | |
330 | WARN_ON(atomic_read(&ff->count) > 1); | |
331 | fuse_prepare_release(ff, flags, FUSE_RELEASE); | |
332 | ff->reserved_req->force = 1; | |
8b41e671 | 333 | ff->reserved_req->background = 0; |
8b0797a4 MS |
334 | fuse_request_send(ff->fc, ff->reserved_req); |
335 | fuse_put_request(ff->fc, ff->reserved_req); | |
336 | kfree(ff); | |
04730fef | 337 | } |
08cbf542 | 338 | EXPORT_SYMBOL_GPL(fuse_sync_release); |
04730fef | 339 | |
71421259 | 340 | /* |
9c8ef561 MS |
341 | * Scramble the ID space with XTEA, so that the value of the files_struct |
342 | * pointer is not exposed to userspace. | |
71421259 | 343 | */ |
f3332114 | 344 | u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) |
71421259 | 345 | { |
9c8ef561 MS |
346 | u32 *k = fc->scramble_key; |
347 | u64 v = (unsigned long) id; | |
348 | u32 v0 = v; | |
349 | u32 v1 = v >> 32; | |
350 | u32 sum = 0; | |
351 | int i; | |
352 | ||
353 | for (i = 0; i < 32; i++) { | |
354 | v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); | |
355 | sum += 0x9E3779B9; | |
356 | v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); | |
357 | } | |
358 | ||
359 | return (u64) v0 + ((u64) v1 << 32); | |
71421259 MS |
360 | } |
361 | ||
3be5a52b | 362 | /* |
ea8cd333 | 363 | * Check if any page in a range is under writeback |
3be5a52b MS |
364 | * |
365 | * This is currently done by walking the list of writepage requests | |
366 | * for the inode, which can be pretty inefficient. | |
367 | */ | |
ea8cd333 PE |
368 | static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from, |
369 | pgoff_t idx_to) | |
3be5a52b MS |
370 | { |
371 | struct fuse_conn *fc = get_fuse_conn(inode); | |
372 | struct fuse_inode *fi = get_fuse_inode(inode); | |
373 | struct fuse_req *req; | |
374 | bool found = false; | |
375 | ||
376 | spin_lock(&fc->lock); | |
377 | list_for_each_entry(req, &fi->writepages, writepages_entry) { | |
378 | pgoff_t curr_index; | |
379 | ||
380 | BUG_ON(req->inode != inode); | |
381 | curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; | |
ea8cd333 PE |
382 | if (idx_from < curr_index + req->num_pages && |
383 | curr_index <= idx_to) { | |
3be5a52b MS |
384 | found = true; |
385 | break; | |
386 | } | |
387 | } | |
388 | spin_unlock(&fc->lock); | |
389 | ||
390 | return found; | |
391 | } | |
392 | ||
ea8cd333 PE |
393 | static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) |
394 | { | |
395 | return fuse_range_is_writeback(inode, index, index); | |
396 | } | |
397 | ||
3be5a52b MS |
398 | /* |
399 | * Wait for page writeback to be completed. | |
400 | * | |
401 | * Since fuse doesn't rely on the VM writeback tracking, this has to | |
402 | * use some other means. | |
403 | */ | |
404 | static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) | |
405 | { | |
406 | struct fuse_inode *fi = get_fuse_inode(inode); | |
407 | ||
408 | wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); | |
409 | return 0; | |
410 | } | |
411 | ||
fe38d7df MP |
412 | /* |
413 | * Wait for all pending writepages on the inode to finish. | |
414 | * | |
415 | * This is currently done by blocking further writes with FUSE_NOWRITE | |
416 | * and waiting for all sent writes to complete. | |
417 | * | |
418 | * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage | |
419 | * could conflict with truncation. | |
420 | */ | |
421 | static void fuse_sync_writes(struct inode *inode) | |
422 | { | |
423 | fuse_set_nowrite(inode); | |
424 | fuse_release_nowrite(inode); | |
425 | } | |
426 | ||
75e1fcc0 | 427 | static int fuse_flush(struct file *file, fl_owner_t id) |
b6aeaded | 428 | { |
6131ffaa | 429 | struct inode *inode = file_inode(file); |
b6aeaded MS |
430 | struct fuse_conn *fc = get_fuse_conn(inode); |
431 | struct fuse_file *ff = file->private_data; | |
432 | struct fuse_req *req; | |
433 | struct fuse_flush_in inarg; | |
434 | int err; | |
435 | ||
248d86e8 MS |
436 | if (is_bad_inode(inode)) |
437 | return -EIO; | |
438 | ||
b6aeaded MS |
439 | if (fc->no_flush) |
440 | return 0; | |
441 | ||
fe38d7df MP |
442 | err = filemap_write_and_wait(file->f_mapping); |
443 | if (err) | |
444 | return err; | |
445 | ||
446 | mutex_lock(&inode->i_mutex); | |
447 | fuse_sync_writes(inode); | |
448 | mutex_unlock(&inode->i_mutex); | |
449 | ||
b111c8c0 | 450 | req = fuse_get_req_nofail_nopages(fc, file); |
b6aeaded MS |
451 | memset(&inarg, 0, sizeof(inarg)); |
452 | inarg.fh = ff->fh; | |
9c8ef561 | 453 | inarg.lock_owner = fuse_lock_owner_id(fc, id); |
b6aeaded MS |
454 | req->in.h.opcode = FUSE_FLUSH; |
455 | req->in.h.nodeid = get_node_id(inode); | |
b6aeaded MS |
456 | req->in.numargs = 1; |
457 | req->in.args[0].size = sizeof(inarg); | |
458 | req->in.args[0].value = &inarg; | |
71421259 | 459 | req->force = 1; |
b93f858a | 460 | fuse_request_send(fc, req); |
b6aeaded MS |
461 | err = req->out.h.error; |
462 | fuse_put_request(fc, req); | |
463 | if (err == -ENOSYS) { | |
464 | fc->no_flush = 1; | |
465 | err = 0; | |
466 | } | |
467 | return err; | |
468 | } | |
469 | ||
02c24a82 JB |
470 | int fuse_fsync_common(struct file *file, loff_t start, loff_t end, |
471 | int datasync, int isdir) | |
b6aeaded | 472 | { |
7ea80859 | 473 | struct inode *inode = file->f_mapping->host; |
b6aeaded MS |
474 | struct fuse_conn *fc = get_fuse_conn(inode); |
475 | struct fuse_file *ff = file->private_data; | |
476 | struct fuse_req *req; | |
477 | struct fuse_fsync_in inarg; | |
478 | int err; | |
479 | ||
248d86e8 MS |
480 | if (is_bad_inode(inode)) |
481 | return -EIO; | |
482 | ||
02c24a82 JB |
483 | err = filemap_write_and_wait_range(inode->i_mapping, start, end); |
484 | if (err) | |
485 | return err; | |
486 | ||
82547981 | 487 | if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) |
b6aeaded MS |
488 | return 0; |
489 | ||
02c24a82 JB |
490 | mutex_lock(&inode->i_mutex); |
491 | ||
3be5a52b MS |
492 | /* |
493 | * Start writeback against all dirty pages of the inode, then | |
494 | * wait for all outstanding writes, before sending the FSYNC | |
495 | * request. | |
496 | */ | |
497 | err = write_inode_now(inode, 0); | |
498 | if (err) | |
02c24a82 | 499 | goto out; |
3be5a52b MS |
500 | |
501 | fuse_sync_writes(inode); | |
502 | ||
b0aa7606 MP |
503 | if (test_bit(FUSE_I_MTIME_DIRTY, &get_fuse_inode(inode)->state)) { |
504 | int err = fuse_flush_mtime(file, false); | |
505 | if (err) | |
506 | goto out; | |
507 | } | |
508 | ||
b111c8c0 | 509 | req = fuse_get_req_nopages(fc); |
02c24a82 JB |
510 | if (IS_ERR(req)) { |
511 | err = PTR_ERR(req); | |
512 | goto out; | |
513 | } | |
b6aeaded MS |
514 | |
515 | memset(&inarg, 0, sizeof(inarg)); | |
516 | inarg.fh = ff->fh; | |
517 | inarg.fsync_flags = datasync ? 1 : 0; | |
82547981 | 518 | req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; |
b6aeaded | 519 | req->in.h.nodeid = get_node_id(inode); |
b6aeaded MS |
520 | req->in.numargs = 1; |
521 | req->in.args[0].size = sizeof(inarg); | |
522 | req->in.args[0].value = &inarg; | |
b93f858a | 523 | fuse_request_send(fc, req); |
b6aeaded MS |
524 | err = req->out.h.error; |
525 | fuse_put_request(fc, req); | |
526 | if (err == -ENOSYS) { | |
82547981 MS |
527 | if (isdir) |
528 | fc->no_fsyncdir = 1; | |
529 | else | |
530 | fc->no_fsync = 1; | |
b6aeaded MS |
531 | err = 0; |
532 | } | |
02c24a82 JB |
533 | out: |
534 | mutex_unlock(&inode->i_mutex); | |
b6aeaded MS |
535 | return err; |
536 | } | |
537 | ||
02c24a82 JB |
538 | static int fuse_fsync(struct file *file, loff_t start, loff_t end, |
539 | int datasync) | |
82547981 | 540 | { |
02c24a82 | 541 | return fuse_fsync_common(file, start, end, datasync, 0); |
82547981 MS |
542 | } |
543 | ||
2106cb18 MS |
544 | void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, |
545 | size_t count, int opcode) | |
b6aeaded | 546 | { |
5c5c5e51 | 547 | struct fuse_read_in *inarg = &req->misc.read.in; |
a6643094 | 548 | struct fuse_file *ff = file->private_data; |
b6aeaded | 549 | |
361b1eb5 MS |
550 | inarg->fh = ff->fh; |
551 | inarg->offset = pos; | |
552 | inarg->size = count; | |
a6643094 | 553 | inarg->flags = file->f_flags; |
361b1eb5 | 554 | req->in.h.opcode = opcode; |
2106cb18 | 555 | req->in.h.nodeid = ff->nodeid; |
b6aeaded MS |
556 | req->in.numargs = 1; |
557 | req->in.args[0].size = sizeof(struct fuse_read_in); | |
c1aa96a5 | 558 | req->in.args[0].value = inarg; |
b6aeaded MS |
559 | req->out.argvar = 1; |
560 | req->out.numargs = 1; | |
561 | req->out.args[0].size = count; | |
b6aeaded MS |
562 | } |
563 | ||
187c5c36 MP |
564 | static void fuse_release_user_pages(struct fuse_req *req, int write) |
565 | { | |
566 | unsigned i; | |
567 | ||
568 | for (i = 0; i < req->num_pages; i++) { | |
569 | struct page *page = req->pages[i]; | |
570 | if (write) | |
571 | set_page_dirty_lock(page); | |
572 | put_page(page); | |
573 | } | |
574 | } | |
575 | ||
01e9d11a MP |
576 | /** |
577 | * In case of short read, the caller sets 'pos' to the position of | |
578 | * actual end of fuse request in IO request. Otherwise, if bytes_requested | |
579 | * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1. | |
580 | * | |
581 | * An example: | |
582 | * User requested DIO read of 64K. It was splitted into two 32K fuse requests, | |
583 | * both submitted asynchronously. The first of them was ACKed by userspace as | |
584 | * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The | |
585 | * second request was ACKed as short, e.g. only 1K was read, resulting in | |
586 | * pos == 33K. | |
587 | * | |
588 | * Thus, when all fuse requests are completed, the minimal non-negative 'pos' | |
589 | * will be equal to the length of the longest contiguous fragment of | |
590 | * transferred data starting from the beginning of IO request. | |
591 | */ | |
592 | static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) | |
593 | { | |
594 | int left; | |
595 | ||
596 | spin_lock(&io->lock); | |
597 | if (err) | |
598 | io->err = io->err ? : err; | |
599 | else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes)) | |
600 | io->bytes = pos; | |
601 | ||
602 | left = --io->reqs; | |
603 | spin_unlock(&io->lock); | |
604 | ||
605 | if (!left) { | |
606 | long res; | |
607 | ||
608 | if (io->err) | |
609 | res = io->err; | |
610 | else if (io->bytes >= 0 && io->write) | |
611 | res = -EIO; | |
612 | else { | |
613 | res = io->bytes < 0 ? io->size : io->bytes; | |
614 | ||
615 | if (!is_sync_kiocb(io->iocb)) { | |
cb5e05d1 | 616 | struct inode *inode = file_inode(io->iocb->ki_filp); |
01e9d11a MP |
617 | struct fuse_conn *fc = get_fuse_conn(inode); |
618 | struct fuse_inode *fi = get_fuse_inode(inode); | |
619 | ||
620 | spin_lock(&fc->lock); | |
621 | fi->attr_version = ++fc->attr_version; | |
622 | spin_unlock(&fc->lock); | |
623 | } | |
624 | } | |
625 | ||
626 | aio_complete(io->iocb, res, 0); | |
627 | kfree(io); | |
628 | } | |
629 | } | |
630 | ||
631 | static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req) | |
632 | { | |
633 | struct fuse_io_priv *io = req->io; | |
634 | ssize_t pos = -1; | |
635 | ||
636 | fuse_release_user_pages(req, !io->write); | |
637 | ||
638 | if (io->write) { | |
639 | if (req->misc.write.in.size != req->misc.write.out.size) | |
640 | pos = req->misc.write.in.offset - io->offset + | |
641 | req->misc.write.out.size; | |
642 | } else { | |
643 | if (req->misc.read.in.size != req->out.args[0].size) | |
644 | pos = req->misc.read.in.offset - io->offset + | |
645 | req->out.args[0].size; | |
646 | } | |
647 | ||
648 | fuse_aio_complete(io, req->out.h.error, pos); | |
649 | } | |
650 | ||
651 | static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req, | |
652 | size_t num_bytes, struct fuse_io_priv *io) | |
653 | { | |
654 | spin_lock(&io->lock); | |
655 | io->size += num_bytes; | |
656 | io->reqs++; | |
657 | spin_unlock(&io->lock); | |
658 | ||
659 | req->io = io; | |
660 | req->end = fuse_aio_complete_req; | |
661 | ||
36cf66ed | 662 | __fuse_get_request(req); |
01e9d11a MP |
663 | fuse_request_send_background(fc, req); |
664 | ||
665 | return num_bytes; | |
666 | } | |
667 | ||
36cf66ed | 668 | static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io, |
2106cb18 | 669 | loff_t pos, size_t count, fl_owner_t owner) |
04730fef | 670 | { |
36cf66ed | 671 | struct file *file = io->file; |
2106cb18 MS |
672 | struct fuse_file *ff = file->private_data; |
673 | struct fuse_conn *fc = ff->fc; | |
f3332114 | 674 | |
2106cb18 | 675 | fuse_read_fill(req, file, pos, count, FUSE_READ); |
f3332114 | 676 | if (owner != NULL) { |
5c5c5e51 | 677 | struct fuse_read_in *inarg = &req->misc.read.in; |
f3332114 MS |
678 | |
679 | inarg->read_flags |= FUSE_READ_LOCKOWNER; | |
680 | inarg->lock_owner = fuse_lock_owner_id(fc, owner); | |
681 | } | |
36cf66ed MP |
682 | |
683 | if (io->async) | |
684 | return fuse_async_req_send(fc, req, count, io); | |
685 | ||
b93f858a | 686 | fuse_request_send(fc, req); |
361b1eb5 | 687 | return req->out.args[0].size; |
04730fef MS |
688 | } |
689 | ||
5c5c5e51 MS |
690 | static void fuse_read_update_size(struct inode *inode, loff_t size, |
691 | u64 attr_ver) | |
692 | { | |
693 | struct fuse_conn *fc = get_fuse_conn(inode); | |
694 | struct fuse_inode *fi = get_fuse_inode(inode); | |
695 | ||
696 | spin_lock(&fc->lock); | |
06a7c3c2 MP |
697 | if (attr_ver == fi->attr_version && size < inode->i_size && |
698 | !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) { | |
5c5c5e51 MS |
699 | fi->attr_version = ++fc->attr_version; |
700 | i_size_write(inode, size); | |
701 | } | |
702 | spin_unlock(&fc->lock); | |
703 | } | |
704 | ||
a92adc82 PE |
705 | static void fuse_short_read(struct fuse_req *req, struct inode *inode, |
706 | u64 attr_ver) | |
707 | { | |
708 | size_t num_read = req->out.args[0].size; | |
8373200b PE |
709 | struct fuse_conn *fc = get_fuse_conn(inode); |
710 | ||
711 | if (fc->writeback_cache) { | |
712 | /* | |
713 | * A hole in a file. Some data after the hole are in page cache, | |
714 | * but have not reached the client fs yet. So, the hole is not | |
715 | * present there. | |
716 | */ | |
717 | int i; | |
718 | int start_idx = num_read >> PAGE_CACHE_SHIFT; | |
719 | size_t off = num_read & (PAGE_CACHE_SIZE - 1); | |
a92adc82 | 720 | |
8373200b PE |
721 | for (i = start_idx; i < req->num_pages; i++) { |
722 | zero_user_segment(req->pages[i], off, PAGE_CACHE_SIZE); | |
723 | off = 0; | |
724 | } | |
725 | } else { | |
726 | loff_t pos = page_offset(req->pages[0]) + num_read; | |
727 | fuse_read_update_size(inode, pos, attr_ver); | |
728 | } | |
a92adc82 PE |
729 | } |
730 | ||
482fce55 | 731 | static int fuse_do_readpage(struct file *file, struct page *page) |
b6aeaded | 732 | { |
36cf66ed | 733 | struct fuse_io_priv io = { .async = 0, .file = file }; |
b6aeaded MS |
734 | struct inode *inode = page->mapping->host; |
735 | struct fuse_conn *fc = get_fuse_conn(inode); | |
248d86e8 | 736 | struct fuse_req *req; |
5c5c5e51 MS |
737 | size_t num_read; |
738 | loff_t pos = page_offset(page); | |
739 | size_t count = PAGE_CACHE_SIZE; | |
740 | u64 attr_ver; | |
248d86e8 MS |
741 | int err; |
742 | ||
3be5a52b | 743 | /* |
25985edc | 744 | * Page writeback can extend beyond the lifetime of the |
3be5a52b MS |
745 | * page-cache page, so make sure we read a properly synced |
746 | * page. | |
747 | */ | |
748 | fuse_wait_on_page_writeback(inode, page->index); | |
749 | ||
b111c8c0 | 750 | req = fuse_get_req(fc, 1); |
ce1d5a49 | 751 | if (IS_ERR(req)) |
482fce55 | 752 | return PTR_ERR(req); |
b6aeaded | 753 | |
5c5c5e51 MS |
754 | attr_ver = fuse_get_attr_version(fc); |
755 | ||
b6aeaded | 756 | req->out.page_zeroing = 1; |
f4975c67 | 757 | req->out.argpages = 1; |
b6aeaded MS |
758 | req->num_pages = 1; |
759 | req->pages[0] = page; | |
85f40aec | 760 | req->page_descs[0].length = count; |
36cf66ed | 761 | num_read = fuse_send_read(req, &io, pos, count, NULL); |
b6aeaded | 762 | err = req->out.h.error; |
5c5c5e51 MS |
763 | |
764 | if (!err) { | |
765 | /* | |
766 | * Short read means EOF. If file size is larger, truncate it | |
767 | */ | |
768 | if (num_read < count) | |
a92adc82 | 769 | fuse_short_read(req, inode, attr_ver); |
5c5c5e51 | 770 | |
b6aeaded | 771 | SetPageUptodate(page); |
5c5c5e51 MS |
772 | } |
773 | ||
a92adc82 | 774 | fuse_put_request(fc, req); |
482fce55 MP |
775 | |
776 | return err; | |
777 | } | |
778 | ||
779 | static int fuse_readpage(struct file *file, struct page *page) | |
780 | { | |
781 | struct inode *inode = page->mapping->host; | |
782 | int err; | |
783 | ||
784 | err = -EIO; | |
785 | if (is_bad_inode(inode)) | |
786 | goto out; | |
787 | ||
788 | err = fuse_do_readpage(file, page); | |
451418fc | 789 | fuse_invalidate_atime(inode); |
b6aeaded MS |
790 | out: |
791 | unlock_page(page); | |
792 | return err; | |
793 | } | |
794 | ||
c1aa96a5 | 795 | static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) |
db50b96c | 796 | { |
c1aa96a5 | 797 | int i; |
5c5c5e51 MS |
798 | size_t count = req->misc.read.in.size; |
799 | size_t num_read = req->out.args[0].size; | |
ce534fb0 | 800 | struct address_space *mapping = NULL; |
c1aa96a5 | 801 | |
ce534fb0 MS |
802 | for (i = 0; mapping == NULL && i < req->num_pages; i++) |
803 | mapping = req->pages[i]->mapping; | |
5c5c5e51 | 804 | |
ce534fb0 MS |
805 | if (mapping) { |
806 | struct inode *inode = mapping->host; | |
807 | ||
808 | /* | |
809 | * Short read means EOF. If file size is larger, truncate it | |
810 | */ | |
a92adc82 PE |
811 | if (!req->out.h.error && num_read < count) |
812 | fuse_short_read(req, inode, req->misc.read.attr_ver); | |
ce534fb0 | 813 | |
451418fc | 814 | fuse_invalidate_atime(inode); |
ce534fb0 | 815 | } |
c1aa96a5 | 816 | |
db50b96c MS |
817 | for (i = 0; i < req->num_pages; i++) { |
818 | struct page *page = req->pages[i]; | |
819 | if (!req->out.h.error) | |
820 | SetPageUptodate(page); | |
c1aa96a5 MS |
821 | else |
822 | SetPageError(page); | |
db50b96c | 823 | unlock_page(page); |
b5dd3285 | 824 | page_cache_release(page); |
db50b96c | 825 | } |
c756e0a4 | 826 | if (req->ff) |
5a18ec17 | 827 | fuse_file_put(req->ff, false); |
c1aa96a5 MS |
828 | } |
829 | ||
2106cb18 | 830 | static void fuse_send_readpages(struct fuse_req *req, struct file *file) |
c1aa96a5 | 831 | { |
2106cb18 MS |
832 | struct fuse_file *ff = file->private_data; |
833 | struct fuse_conn *fc = ff->fc; | |
c1aa96a5 MS |
834 | loff_t pos = page_offset(req->pages[0]); |
835 | size_t count = req->num_pages << PAGE_CACHE_SHIFT; | |
f4975c67 MS |
836 | |
837 | req->out.argpages = 1; | |
c1aa96a5 | 838 | req->out.page_zeroing = 1; |
ce534fb0 | 839 | req->out.page_replace = 1; |
2106cb18 | 840 | fuse_read_fill(req, file, pos, count, FUSE_READ); |
5c5c5e51 | 841 | req->misc.read.attr_ver = fuse_get_attr_version(fc); |
9cd68455 | 842 | if (fc->async_read) { |
c756e0a4 | 843 | req->ff = fuse_file_get(ff); |
9cd68455 | 844 | req->end = fuse_readpages_end; |
b93f858a | 845 | fuse_request_send_background(fc, req); |
9cd68455 | 846 | } else { |
b93f858a | 847 | fuse_request_send(fc, req); |
9cd68455 | 848 | fuse_readpages_end(fc, req); |
e9bb09dd | 849 | fuse_put_request(fc, req); |
9cd68455 | 850 | } |
db50b96c MS |
851 | } |
852 | ||
c756e0a4 | 853 | struct fuse_fill_data { |
db50b96c | 854 | struct fuse_req *req; |
a6643094 | 855 | struct file *file; |
db50b96c | 856 | struct inode *inode; |
f8dbdf81 | 857 | unsigned nr_pages; |
db50b96c MS |
858 | }; |
859 | ||
860 | static int fuse_readpages_fill(void *_data, struct page *page) | |
861 | { | |
c756e0a4 | 862 | struct fuse_fill_data *data = _data; |
db50b96c MS |
863 | struct fuse_req *req = data->req; |
864 | struct inode *inode = data->inode; | |
865 | struct fuse_conn *fc = get_fuse_conn(inode); | |
866 | ||
3be5a52b MS |
867 | fuse_wait_on_page_writeback(inode, page->index); |
868 | ||
db50b96c MS |
869 | if (req->num_pages && |
870 | (req->num_pages == FUSE_MAX_PAGES_PER_REQ || | |
871 | (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || | |
872 | req->pages[req->num_pages - 1]->index + 1 != page->index)) { | |
f8dbdf81 MP |
873 | int nr_alloc = min_t(unsigned, data->nr_pages, |
874 | FUSE_MAX_PAGES_PER_REQ); | |
2106cb18 | 875 | fuse_send_readpages(req, data->file); |
8b41e671 MP |
876 | if (fc->async_read) |
877 | req = fuse_get_req_for_background(fc, nr_alloc); | |
878 | else | |
879 | req = fuse_get_req(fc, nr_alloc); | |
880 | ||
881 | data->req = req; | |
ce1d5a49 | 882 | if (IS_ERR(req)) { |
db50b96c | 883 | unlock_page(page); |
ce1d5a49 | 884 | return PTR_ERR(req); |
db50b96c | 885 | } |
db50b96c | 886 | } |
f8dbdf81 MP |
887 | |
888 | if (WARN_ON(req->num_pages >= req->max_pages)) { | |
889 | fuse_put_request(fc, req); | |
890 | return -EIO; | |
891 | } | |
892 | ||
b5dd3285 | 893 | page_cache_get(page); |
db50b96c | 894 | req->pages[req->num_pages] = page; |
85f40aec | 895 | req->page_descs[req->num_pages].length = PAGE_SIZE; |
1729a16c | 896 | req->num_pages++; |
f8dbdf81 | 897 | data->nr_pages--; |
db50b96c MS |
898 | return 0; |
899 | } | |
900 | ||
901 | static int fuse_readpages(struct file *file, struct address_space *mapping, | |
902 | struct list_head *pages, unsigned nr_pages) | |
903 | { | |
904 | struct inode *inode = mapping->host; | |
905 | struct fuse_conn *fc = get_fuse_conn(inode); | |
c756e0a4 | 906 | struct fuse_fill_data data; |
db50b96c | 907 | int err; |
f8dbdf81 | 908 | int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ); |
248d86e8 | 909 | |
1d7ea732 | 910 | err = -EIO; |
248d86e8 | 911 | if (is_bad_inode(inode)) |
2e990021 | 912 | goto out; |
248d86e8 | 913 | |
a6643094 | 914 | data.file = file; |
db50b96c | 915 | data.inode = inode; |
8b41e671 MP |
916 | if (fc->async_read) |
917 | data.req = fuse_get_req_for_background(fc, nr_alloc); | |
918 | else | |
919 | data.req = fuse_get_req(fc, nr_alloc); | |
f8dbdf81 | 920 | data.nr_pages = nr_pages; |
1d7ea732 | 921 | err = PTR_ERR(data.req); |
ce1d5a49 | 922 | if (IS_ERR(data.req)) |
2e990021 | 923 | goto out; |
db50b96c MS |
924 | |
925 | err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); | |
d3406ffa MS |
926 | if (!err) { |
927 | if (data.req->num_pages) | |
2106cb18 | 928 | fuse_send_readpages(data.req, file); |
d3406ffa MS |
929 | else |
930 | fuse_put_request(fc, data.req); | |
931 | } | |
2e990021 | 932 | out: |
1d7ea732 | 933 | return err; |
db50b96c MS |
934 | } |
935 | ||
bcb4be80 MS |
936 | static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, |
937 | unsigned long nr_segs, loff_t pos) | |
938 | { | |
939 | struct inode *inode = iocb->ki_filp->f_mapping->host; | |
a8894274 | 940 | struct fuse_conn *fc = get_fuse_conn(inode); |
bcb4be80 | 941 | |
a8894274 BF |
942 | /* |
943 | * In auto invalidate mode, always update attributes on read. | |
944 | * Otherwise, only update if we attempt to read past EOF (to ensure | |
945 | * i_size is up to date). | |
946 | */ | |
947 | if (fc->auto_inval_data || | |
948 | (pos + iov_length(iov, nr_segs) > i_size_read(inode))) { | |
bcb4be80 | 949 | int err; |
bcb4be80 MS |
950 | err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); |
951 | if (err) | |
952 | return err; | |
953 | } | |
954 | ||
955 | return generic_file_aio_read(iocb, iov, nr_segs, pos); | |
956 | } | |
957 | ||
2d698b07 | 958 | static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, |
2106cb18 | 959 | loff_t pos, size_t count) |
b6aeaded | 960 | { |
b25e82e5 MS |
961 | struct fuse_write_in *inarg = &req->misc.write.in; |
962 | struct fuse_write_out *outarg = &req->misc.write.out; | |
b6aeaded | 963 | |
b25e82e5 MS |
964 | inarg->fh = ff->fh; |
965 | inarg->offset = pos; | |
966 | inarg->size = count; | |
b6aeaded | 967 | req->in.h.opcode = FUSE_WRITE; |
2106cb18 | 968 | req->in.h.nodeid = ff->nodeid; |
b6aeaded | 969 | req->in.numargs = 2; |
2106cb18 | 970 | if (ff->fc->minor < 9) |
f3332114 MS |
971 | req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; |
972 | else | |
973 | req->in.args[0].size = sizeof(struct fuse_write_in); | |
b25e82e5 | 974 | req->in.args[0].value = inarg; |
b6aeaded MS |
975 | req->in.args[1].size = count; |
976 | req->out.numargs = 1; | |
977 | req->out.args[0].size = sizeof(struct fuse_write_out); | |
b25e82e5 MS |
978 | req->out.args[0].value = outarg; |
979 | } | |
980 | ||
36cf66ed | 981 | static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io, |
2106cb18 | 982 | loff_t pos, size_t count, fl_owner_t owner) |
b25e82e5 | 983 | { |
36cf66ed | 984 | struct file *file = io->file; |
2106cb18 MS |
985 | struct fuse_file *ff = file->private_data; |
986 | struct fuse_conn *fc = ff->fc; | |
2d698b07 MS |
987 | struct fuse_write_in *inarg = &req->misc.write.in; |
988 | ||
2106cb18 | 989 | fuse_write_fill(req, ff, pos, count); |
2d698b07 | 990 | inarg->flags = file->f_flags; |
f3332114 | 991 | if (owner != NULL) { |
f3332114 MS |
992 | inarg->write_flags |= FUSE_WRITE_LOCKOWNER; |
993 | inarg->lock_owner = fuse_lock_owner_id(fc, owner); | |
994 | } | |
36cf66ed MP |
995 | |
996 | if (io->async) | |
997 | return fuse_async_req_send(fc, req, count, io); | |
998 | ||
b93f858a | 999 | fuse_request_send(fc, req); |
b25e82e5 | 1000 | return req->misc.write.out.size; |
b6aeaded MS |
1001 | } |
1002 | ||
b0aa7606 | 1003 | bool fuse_write_update_size(struct inode *inode, loff_t pos) |
854512ec MS |
1004 | { |
1005 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1006 | struct fuse_inode *fi = get_fuse_inode(inode); | |
b0aa7606 | 1007 | bool ret = false; |
854512ec MS |
1008 | |
1009 | spin_lock(&fc->lock); | |
1010 | fi->attr_version = ++fc->attr_version; | |
b0aa7606 | 1011 | if (pos > inode->i_size) { |
854512ec | 1012 | i_size_write(inode, pos); |
b0aa7606 MP |
1013 | ret = true; |
1014 | } | |
854512ec | 1015 | spin_unlock(&fc->lock); |
b0aa7606 MP |
1016 | |
1017 | return ret; | |
854512ec MS |
1018 | } |
1019 | ||
ea9b9907 NP |
1020 | static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, |
1021 | struct inode *inode, loff_t pos, | |
1022 | size_t count) | |
1023 | { | |
1024 | size_t res; | |
1025 | unsigned offset; | |
1026 | unsigned i; | |
36cf66ed | 1027 | struct fuse_io_priv io = { .async = 0, .file = file }; |
ea9b9907 NP |
1028 | |
1029 | for (i = 0; i < req->num_pages; i++) | |
1030 | fuse_wait_on_page_writeback(inode, req->pages[i]->index); | |
1031 | ||
36cf66ed | 1032 | res = fuse_send_write(req, &io, pos, count, NULL); |
ea9b9907 | 1033 | |
b2430d75 | 1034 | offset = req->page_descs[0].offset; |
ea9b9907 NP |
1035 | count = res; |
1036 | for (i = 0; i < req->num_pages; i++) { | |
1037 | struct page *page = req->pages[i]; | |
1038 | ||
1039 | if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) | |
1040 | SetPageUptodate(page); | |
1041 | ||
1042 | if (count > PAGE_CACHE_SIZE - offset) | |
1043 | count -= PAGE_CACHE_SIZE - offset; | |
1044 | else | |
1045 | count = 0; | |
1046 | offset = 0; | |
1047 | ||
1048 | unlock_page(page); | |
1049 | page_cache_release(page); | |
1050 | } | |
1051 | ||
1052 | return res; | |
1053 | } | |
1054 | ||
1055 | static ssize_t fuse_fill_write_pages(struct fuse_req *req, | |
1056 | struct address_space *mapping, | |
1057 | struct iov_iter *ii, loff_t pos) | |
1058 | { | |
1059 | struct fuse_conn *fc = get_fuse_conn(mapping->host); | |
1060 | unsigned offset = pos & (PAGE_CACHE_SIZE - 1); | |
1061 | size_t count = 0; | |
1062 | int err; | |
1063 | ||
f4975c67 | 1064 | req->in.argpages = 1; |
b2430d75 | 1065 | req->page_descs[0].offset = offset; |
ea9b9907 NP |
1066 | |
1067 | do { | |
1068 | size_t tmp; | |
1069 | struct page *page; | |
1070 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | |
1071 | size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, | |
1072 | iov_iter_count(ii)); | |
1073 | ||
1074 | bytes = min_t(size_t, bytes, fc->max_write - count); | |
1075 | ||
1076 | again: | |
1077 | err = -EFAULT; | |
1078 | if (iov_iter_fault_in_readable(ii, bytes)) | |
1079 | break; | |
1080 | ||
1081 | err = -ENOMEM; | |
54566b2c | 1082 | page = grab_cache_page_write_begin(mapping, index, 0); |
ea9b9907 NP |
1083 | if (!page) |
1084 | break; | |
1085 | ||
931e80e4 | 1086 | if (mapping_writably_mapped(mapping)) |
1087 | flush_dcache_page(page); | |
1088 | ||
ea9b9907 | 1089 | tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); |
ea9b9907 NP |
1090 | flush_dcache_page(page); |
1091 | ||
478e0841 JW |
1092 | mark_page_accessed(page); |
1093 | ||
ea9b9907 NP |
1094 | if (!tmp) { |
1095 | unlock_page(page); | |
1096 | page_cache_release(page); | |
1097 | bytes = min(bytes, iov_iter_single_seg_count(ii)); | |
1098 | goto again; | |
1099 | } | |
1100 | ||
1101 | err = 0; | |
1102 | req->pages[req->num_pages] = page; | |
85f40aec | 1103 | req->page_descs[req->num_pages].length = tmp; |
ea9b9907 NP |
1104 | req->num_pages++; |
1105 | ||
1106 | iov_iter_advance(ii, tmp); | |
1107 | count += tmp; | |
1108 | pos += tmp; | |
1109 | offset += tmp; | |
1110 | if (offset == PAGE_CACHE_SIZE) | |
1111 | offset = 0; | |
1112 | ||
78bb6cb9 MS |
1113 | if (!fc->big_writes) |
1114 | break; | |
ea9b9907 | 1115 | } while (iov_iter_count(ii) && count < fc->max_write && |
d07f09f5 | 1116 | req->num_pages < req->max_pages && offset == 0); |
ea9b9907 NP |
1117 | |
1118 | return count > 0 ? count : err; | |
1119 | } | |
1120 | ||
d07f09f5 MP |
1121 | static inline unsigned fuse_wr_pages(loff_t pos, size_t len) |
1122 | { | |
1123 | return min_t(unsigned, | |
1124 | ((pos + len - 1) >> PAGE_CACHE_SHIFT) - | |
1125 | (pos >> PAGE_CACHE_SHIFT) + 1, | |
1126 | FUSE_MAX_PAGES_PER_REQ); | |
1127 | } | |
1128 | ||
ea9b9907 NP |
1129 | static ssize_t fuse_perform_write(struct file *file, |
1130 | struct address_space *mapping, | |
1131 | struct iov_iter *ii, loff_t pos) | |
1132 | { | |
1133 | struct inode *inode = mapping->host; | |
1134 | struct fuse_conn *fc = get_fuse_conn(inode); | |
06a7c3c2 | 1135 | struct fuse_inode *fi = get_fuse_inode(inode); |
ea9b9907 NP |
1136 | int err = 0; |
1137 | ssize_t res = 0; | |
1138 | ||
1139 | if (is_bad_inode(inode)) | |
1140 | return -EIO; | |
1141 | ||
06a7c3c2 MP |
1142 | if (inode->i_size < pos + iov_iter_count(ii)) |
1143 | set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); | |
1144 | ||
ea9b9907 NP |
1145 | do { |
1146 | struct fuse_req *req; | |
1147 | ssize_t count; | |
d07f09f5 | 1148 | unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii)); |
ea9b9907 | 1149 | |
d07f09f5 | 1150 | req = fuse_get_req(fc, nr_pages); |
ea9b9907 NP |
1151 | if (IS_ERR(req)) { |
1152 | err = PTR_ERR(req); | |
1153 | break; | |
1154 | } | |
1155 | ||
1156 | count = fuse_fill_write_pages(req, mapping, ii, pos); | |
1157 | if (count <= 0) { | |
1158 | err = count; | |
1159 | } else { | |
1160 | size_t num_written; | |
1161 | ||
1162 | num_written = fuse_send_write_pages(req, file, inode, | |
1163 | pos, count); | |
1164 | err = req->out.h.error; | |
1165 | if (!err) { | |
1166 | res += num_written; | |
1167 | pos += num_written; | |
1168 | ||
1169 | /* break out of the loop on short write */ | |
1170 | if (num_written != count) | |
1171 | err = -EIO; | |
1172 | } | |
1173 | } | |
1174 | fuse_put_request(fc, req); | |
1175 | } while (!err && iov_iter_count(ii)); | |
1176 | ||
1177 | if (res > 0) | |
1178 | fuse_write_update_size(inode, pos); | |
1179 | ||
06a7c3c2 | 1180 | clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); |
ea9b9907 NP |
1181 | fuse_invalidate_attr(inode); |
1182 | ||
1183 | return res > 0 ? res : err; | |
1184 | } | |
1185 | ||
1186 | static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | |
1187 | unsigned long nr_segs, loff_t pos) | |
1188 | { | |
1189 | struct file *file = iocb->ki_filp; | |
1190 | struct address_space *mapping = file->f_mapping; | |
1191 | size_t count = 0; | |
4273b793 | 1192 | size_t ocount = 0; |
ea9b9907 | 1193 | ssize_t written = 0; |
4273b793 | 1194 | ssize_t written_buffered = 0; |
ea9b9907 NP |
1195 | struct inode *inode = mapping->host; |
1196 | ssize_t err; | |
1197 | struct iov_iter i; | |
4273b793 | 1198 | loff_t endbyte = 0; |
ea9b9907 | 1199 | |
4d99ff8f PE |
1200 | if (get_fuse_conn(inode)->writeback_cache) { |
1201 | /* Update size (EOF optimization) and mode (SUID clearing) */ | |
1202 | err = fuse_update_attributes(mapping->host, NULL, file, NULL); | |
1203 | if (err) | |
1204 | return err; | |
1205 | ||
1206 | return generic_file_aio_write(iocb, iov, nr_segs, pos); | |
1207 | } | |
1208 | ||
ea9b9907 NP |
1209 | WARN_ON(iocb->ki_pos != pos); |
1210 | ||
4273b793 AA |
1211 | ocount = 0; |
1212 | err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); | |
ea9b9907 NP |
1213 | if (err) |
1214 | return err; | |
1215 | ||
4273b793 | 1216 | count = ocount; |
ea9b9907 | 1217 | mutex_lock(&inode->i_mutex); |
ea9b9907 NP |
1218 | |
1219 | /* We can write back this queue in page reclaim */ | |
1220 | current->backing_dev_info = mapping->backing_dev_info; | |
1221 | ||
1222 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | |
1223 | if (err) | |
1224 | goto out; | |
1225 | ||
1226 | if (count == 0) | |
1227 | goto out; | |
1228 | ||
2f1936b8 | 1229 | err = file_remove_suid(file); |
ea9b9907 NP |
1230 | if (err) |
1231 | goto out; | |
1232 | ||
c3b2da31 JB |
1233 | err = file_update_time(file); |
1234 | if (err) | |
1235 | goto out; | |
ea9b9907 | 1236 | |
4273b793 | 1237 | if (file->f_flags & O_DIRECT) { |
5cb6c6c7 | 1238 | written = generic_file_direct_write(iocb, iov, &nr_segs, pos, |
4273b793 AA |
1239 | count, ocount); |
1240 | if (written < 0 || written == count) | |
1241 | goto out; | |
1242 | ||
1243 | pos += written; | |
1244 | count -= written; | |
ea9b9907 | 1245 | |
4273b793 AA |
1246 | iov_iter_init(&i, iov, nr_segs, count, written); |
1247 | written_buffered = fuse_perform_write(file, mapping, &i, pos); | |
1248 | if (written_buffered < 0) { | |
1249 | err = written_buffered; | |
1250 | goto out; | |
1251 | } | |
1252 | endbyte = pos + written_buffered - 1; | |
1253 | ||
1254 | err = filemap_write_and_wait_range(file->f_mapping, pos, | |
1255 | endbyte); | |
1256 | if (err) | |
1257 | goto out; | |
1258 | ||
1259 | invalidate_mapping_pages(file->f_mapping, | |
1260 | pos >> PAGE_CACHE_SHIFT, | |
1261 | endbyte >> PAGE_CACHE_SHIFT); | |
1262 | ||
1263 | written += written_buffered; | |
1264 | iocb->ki_pos = pos + written_buffered; | |
1265 | } else { | |
1266 | iov_iter_init(&i, iov, nr_segs, count, 0); | |
1267 | written = fuse_perform_write(file, mapping, &i, pos); | |
1268 | if (written >= 0) | |
1269 | iocb->ki_pos = pos + written; | |
1270 | } | |
ea9b9907 NP |
1271 | out: |
1272 | current->backing_dev_info = NULL; | |
1273 | mutex_unlock(&inode->i_mutex); | |
1274 | ||
1275 | return written ? written : err; | |
1276 | } | |
1277 | ||
7c190c8b MP |
1278 | static inline void fuse_page_descs_length_init(struct fuse_req *req, |
1279 | unsigned index, unsigned nr_pages) | |
85f40aec MP |
1280 | { |
1281 | int i; | |
1282 | ||
7c190c8b | 1283 | for (i = index; i < index + nr_pages; i++) |
85f40aec MP |
1284 | req->page_descs[i].length = PAGE_SIZE - |
1285 | req->page_descs[i].offset; | |
1286 | } | |
1287 | ||
7c190c8b MP |
1288 | static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii) |
1289 | { | |
1290 | return (unsigned long)ii->iov->iov_base + ii->iov_offset; | |
1291 | } | |
1292 | ||
1293 | static inline size_t fuse_get_frag_size(const struct iov_iter *ii, | |
1294 | size_t max_size) | |
1295 | { | |
1296 | return min(iov_iter_single_seg_count(ii), max_size); | |
1297 | } | |
1298 | ||
b98d023a | 1299 | static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, |
ce60a2f1 | 1300 | size_t *nbytesp, int write) |
413ef8cb | 1301 | { |
7c190c8b | 1302 | size_t nbytes = 0; /* # bytes already packed in req */ |
b98d023a | 1303 | |
f4975c67 MS |
1304 | /* Special case for kernel I/O: can copy directly into the buffer */ |
1305 | if (segment_eq(get_fs(), KERNEL_DS)) { | |
7c190c8b MP |
1306 | unsigned long user_addr = fuse_get_user_addr(ii); |
1307 | size_t frag_size = fuse_get_frag_size(ii, *nbytesp); | |
1308 | ||
f4975c67 MS |
1309 | if (write) |
1310 | req->in.args[1].value = (void *) user_addr; | |
1311 | else | |
1312 | req->out.args[0].value = (void *) user_addr; | |
1313 | ||
b98d023a MP |
1314 | iov_iter_advance(ii, frag_size); |
1315 | *nbytesp = frag_size; | |
f4975c67 MS |
1316 | return 0; |
1317 | } | |
413ef8cb | 1318 | |
5565a9d8 | 1319 | while (nbytes < *nbytesp && req->num_pages < req->max_pages) { |
7c190c8b MP |
1320 | unsigned npages; |
1321 | unsigned long user_addr = fuse_get_user_addr(ii); | |
1322 | unsigned offset = user_addr & ~PAGE_MASK; | |
1323 | size_t frag_size = fuse_get_frag_size(ii, *nbytesp - nbytes); | |
1324 | int ret; | |
413ef8cb | 1325 | |
5565a9d8 | 1326 | unsigned n = req->max_pages - req->num_pages; |
7c190c8b MP |
1327 | frag_size = min_t(size_t, frag_size, n << PAGE_SHIFT); |
1328 | ||
1329 | npages = (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
1330 | npages = clamp(npages, 1U, n); | |
1331 | ||
1332 | ret = get_user_pages_fast(user_addr, npages, !write, | |
1333 | &req->pages[req->num_pages]); | |
1334 | if (ret < 0) | |
1335 | return ret; | |
1336 | ||
1337 | npages = ret; | |
1338 | frag_size = min_t(size_t, frag_size, | |
1339 | (npages << PAGE_SHIFT) - offset); | |
1340 | iov_iter_advance(ii, frag_size); | |
1341 | ||
1342 | req->page_descs[req->num_pages].offset = offset; | |
1343 | fuse_page_descs_length_init(req, req->num_pages, npages); | |
1344 | ||
1345 | req->num_pages += npages; | |
1346 | req->page_descs[req->num_pages - 1].length -= | |
1347 | (npages << PAGE_SHIFT) - offset - frag_size; | |
1348 | ||
1349 | nbytes += frag_size; | |
1350 | } | |
f4975c67 MS |
1351 | |
1352 | if (write) | |
1353 | req->in.argpages = 1; | |
1354 | else | |
1355 | req->out.argpages = 1; | |
1356 | ||
7c190c8b | 1357 | *nbytesp = nbytes; |
f4975c67 | 1358 | |
413ef8cb MS |
1359 | return 0; |
1360 | } | |
1361 | ||
5565a9d8 MP |
1362 | static inline int fuse_iter_npages(const struct iov_iter *ii_p) |
1363 | { | |
1364 | struct iov_iter ii = *ii_p; | |
1365 | int npages = 0; | |
1366 | ||
1367 | while (iov_iter_count(&ii) && npages < FUSE_MAX_PAGES_PER_REQ) { | |
1368 | unsigned long user_addr = fuse_get_user_addr(&ii); | |
1369 | unsigned offset = user_addr & ~PAGE_MASK; | |
1370 | size_t frag_size = iov_iter_single_seg_count(&ii); | |
1371 | ||
1372 | npages += (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
1373 | iov_iter_advance(&ii, frag_size); | |
1374 | } | |
1375 | ||
1376 | return min(npages, FUSE_MAX_PAGES_PER_REQ); | |
1377 | } | |
1378 | ||
36cf66ed | 1379 | ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov, |
fb05f41f | 1380 | unsigned long nr_segs, size_t count, loff_t *ppos, |
ea8cd333 | 1381 | int flags) |
413ef8cb | 1382 | { |
ea8cd333 PE |
1383 | int write = flags & FUSE_DIO_WRITE; |
1384 | int cuse = flags & FUSE_DIO_CUSE; | |
36cf66ed | 1385 | struct file *file = io->file; |
ea8cd333 | 1386 | struct inode *inode = file->f_mapping->host; |
2106cb18 MS |
1387 | struct fuse_file *ff = file->private_data; |
1388 | struct fuse_conn *fc = ff->fc; | |
413ef8cb MS |
1389 | size_t nmax = write ? fc->max_write : fc->max_read; |
1390 | loff_t pos = *ppos; | |
ea8cd333 PE |
1391 | pgoff_t idx_from = pos >> PAGE_CACHE_SHIFT; |
1392 | pgoff_t idx_to = (pos + count - 1) >> PAGE_CACHE_SHIFT; | |
413ef8cb | 1393 | ssize_t res = 0; |
248d86e8 | 1394 | struct fuse_req *req; |
b98d023a MP |
1395 | struct iov_iter ii; |
1396 | ||
1397 | iov_iter_init(&ii, iov, nr_segs, count, 0); | |
248d86e8 | 1398 | |
de82b923 BF |
1399 | if (io->async) |
1400 | req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii)); | |
1401 | else | |
1402 | req = fuse_get_req(fc, fuse_iter_npages(&ii)); | |
ce1d5a49 MS |
1403 | if (IS_ERR(req)) |
1404 | return PTR_ERR(req); | |
413ef8cb | 1405 | |
ea8cd333 PE |
1406 | if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) { |
1407 | if (!write) | |
1408 | mutex_lock(&inode->i_mutex); | |
1409 | fuse_sync_writes(inode); | |
1410 | if (!write) | |
1411 | mutex_unlock(&inode->i_mutex); | |
1412 | } | |
1413 | ||
413ef8cb | 1414 | while (count) { |
413ef8cb | 1415 | size_t nres; |
2106cb18 | 1416 | fl_owner_t owner = current->files; |
f4975c67 | 1417 | size_t nbytes = min(count, nmax); |
b98d023a | 1418 | int err = fuse_get_user_pages(req, &ii, &nbytes, write); |
413ef8cb MS |
1419 | if (err) { |
1420 | res = err; | |
1421 | break; | |
1422 | } | |
f4975c67 | 1423 | |
413ef8cb | 1424 | if (write) |
36cf66ed | 1425 | nres = fuse_send_write(req, io, pos, nbytes, owner); |
413ef8cb | 1426 | else |
36cf66ed | 1427 | nres = fuse_send_read(req, io, pos, nbytes, owner); |
2106cb18 | 1428 | |
36cf66ed MP |
1429 | if (!io->async) |
1430 | fuse_release_user_pages(req, !write); | |
413ef8cb MS |
1431 | if (req->out.h.error) { |
1432 | if (!res) | |
1433 | res = req->out.h.error; | |
1434 | break; | |
1435 | } else if (nres > nbytes) { | |
1436 | res = -EIO; | |
1437 | break; | |
1438 | } | |
1439 | count -= nres; | |
1440 | res += nres; | |
1441 | pos += nres; | |
413ef8cb MS |
1442 | if (nres != nbytes) |
1443 | break; | |
56cf34ff MS |
1444 | if (count) { |
1445 | fuse_put_request(fc, req); | |
de82b923 BF |
1446 | if (io->async) |
1447 | req = fuse_get_req_for_background(fc, | |
1448 | fuse_iter_npages(&ii)); | |
1449 | else | |
1450 | req = fuse_get_req(fc, fuse_iter_npages(&ii)); | |
56cf34ff MS |
1451 | if (IS_ERR(req)) |
1452 | break; | |
1453 | } | |
413ef8cb | 1454 | } |
f60311d5 AA |
1455 | if (!IS_ERR(req)) |
1456 | fuse_put_request(fc, req); | |
d09cb9d7 | 1457 | if (res > 0) |
413ef8cb | 1458 | *ppos = pos; |
413ef8cb MS |
1459 | |
1460 | return res; | |
1461 | } | |
08cbf542 | 1462 | EXPORT_SYMBOL_GPL(fuse_direct_io); |
413ef8cb | 1463 | |
36cf66ed MP |
1464 | static ssize_t __fuse_direct_read(struct fuse_io_priv *io, |
1465 | const struct iovec *iov, | |
439ee5f0 MP |
1466 | unsigned long nr_segs, loff_t *ppos, |
1467 | size_t count) | |
413ef8cb | 1468 | { |
d09cb9d7 | 1469 | ssize_t res; |
36cf66ed | 1470 | struct file *file = io->file; |
6131ffaa | 1471 | struct inode *inode = file_inode(file); |
d09cb9d7 MS |
1472 | |
1473 | if (is_bad_inode(inode)) | |
1474 | return -EIO; | |
1475 | ||
439ee5f0 | 1476 | res = fuse_direct_io(io, iov, nr_segs, count, ppos, 0); |
d09cb9d7 MS |
1477 | |
1478 | fuse_invalidate_attr(inode); | |
1479 | ||
1480 | return res; | |
413ef8cb MS |
1481 | } |
1482 | ||
b98d023a MP |
1483 | static ssize_t fuse_direct_read(struct file *file, char __user *buf, |
1484 | size_t count, loff_t *ppos) | |
1485 | { | |
36cf66ed | 1486 | struct fuse_io_priv io = { .async = 0, .file = file }; |
fb05f41f | 1487 | struct iovec iov = { .iov_base = buf, .iov_len = count }; |
439ee5f0 | 1488 | return __fuse_direct_read(&io, &iov, 1, ppos, count); |
b98d023a MP |
1489 | } |
1490 | ||
36cf66ed MP |
1491 | static ssize_t __fuse_direct_write(struct fuse_io_priv *io, |
1492 | const struct iovec *iov, | |
b98d023a | 1493 | unsigned long nr_segs, loff_t *ppos) |
413ef8cb | 1494 | { |
36cf66ed | 1495 | struct file *file = io->file; |
6131ffaa | 1496 | struct inode *inode = file_inode(file); |
b98d023a | 1497 | size_t count = iov_length(iov, nr_segs); |
413ef8cb | 1498 | ssize_t res; |
d09cb9d7 | 1499 | |
889f7848 | 1500 | res = generic_write_checks(file, ppos, &count, 0); |
bcba24cc | 1501 | if (!res) |
ea8cd333 PE |
1502 | res = fuse_direct_io(io, iov, nr_segs, count, ppos, |
1503 | FUSE_DIO_WRITE); | |
d09cb9d7 MS |
1504 | |
1505 | fuse_invalidate_attr(inode); | |
1506 | ||
413ef8cb MS |
1507 | return res; |
1508 | } | |
1509 | ||
4273b793 AA |
1510 | static ssize_t fuse_direct_write(struct file *file, const char __user *buf, |
1511 | size_t count, loff_t *ppos) | |
1512 | { | |
fb05f41f | 1513 | struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; |
6131ffaa | 1514 | struct inode *inode = file_inode(file); |
4273b793 | 1515 | ssize_t res; |
36cf66ed | 1516 | struct fuse_io_priv io = { .async = 0, .file = file }; |
4273b793 AA |
1517 | |
1518 | if (is_bad_inode(inode)) | |
1519 | return -EIO; | |
1520 | ||
1521 | /* Don't allow parallel writes to the same file */ | |
1522 | mutex_lock(&inode->i_mutex); | |
36cf66ed | 1523 | res = __fuse_direct_write(&io, &iov, 1, ppos); |
bcba24cc MP |
1524 | if (res > 0) |
1525 | fuse_write_update_size(inode, *ppos); | |
4273b793 AA |
1526 | mutex_unlock(&inode->i_mutex); |
1527 | ||
1528 | return res; | |
1529 | } | |
1530 | ||
3be5a52b | 1531 | static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) |
b6aeaded | 1532 | { |
385b1268 PE |
1533 | int i; |
1534 | ||
1535 | for (i = 0; i < req->num_pages; i++) | |
1536 | __free_page(req->pages[i]); | |
8b284dc4 MS |
1537 | |
1538 | if (req->ff) | |
1539 | fuse_file_put(req->ff, false); | |
3be5a52b MS |
1540 | } |
1541 | ||
1542 | static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) | |
1543 | { | |
1544 | struct inode *inode = req->inode; | |
1545 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1546 | struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; | |
385b1268 | 1547 | int i; |
3be5a52b MS |
1548 | |
1549 | list_del(&req->writepages_entry); | |
385b1268 PE |
1550 | for (i = 0; i < req->num_pages; i++) { |
1551 | dec_bdi_stat(bdi, BDI_WRITEBACK); | |
1552 | dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP); | |
1553 | bdi_writeout_inc(bdi); | |
1554 | } | |
3be5a52b MS |
1555 | wake_up(&fi->page_waitq); |
1556 | } | |
1557 | ||
1558 | /* Called under fc->lock, may release and reacquire it */ | |
6eaf4782 MP |
1559 | static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req, |
1560 | loff_t size) | |
b9ca67b2 MS |
1561 | __releases(fc->lock) |
1562 | __acquires(fc->lock) | |
3be5a52b MS |
1563 | { |
1564 | struct fuse_inode *fi = get_fuse_inode(req->inode); | |
3be5a52b | 1565 | struct fuse_write_in *inarg = &req->misc.write.in; |
385b1268 | 1566 | __u64 data_size = req->num_pages * PAGE_CACHE_SIZE; |
3be5a52b MS |
1567 | |
1568 | if (!fc->connected) | |
1569 | goto out_free; | |
1570 | ||
385b1268 PE |
1571 | if (inarg->offset + data_size <= size) { |
1572 | inarg->size = data_size; | |
3be5a52b | 1573 | } else if (inarg->offset < size) { |
385b1268 | 1574 | inarg->size = size - inarg->offset; |
3be5a52b MS |
1575 | } else { |
1576 | /* Got truncated off completely */ | |
1577 | goto out_free; | |
b6aeaded | 1578 | } |
3be5a52b MS |
1579 | |
1580 | req->in.args[1].size = inarg->size; | |
1581 | fi->writectr++; | |
b93f858a | 1582 | fuse_request_send_background_locked(fc, req); |
3be5a52b MS |
1583 | return; |
1584 | ||
1585 | out_free: | |
1586 | fuse_writepage_finish(fc, req); | |
1587 | spin_unlock(&fc->lock); | |
1588 | fuse_writepage_free(fc, req); | |
e9bb09dd | 1589 | fuse_put_request(fc, req); |
3be5a52b | 1590 | spin_lock(&fc->lock); |
b6aeaded MS |
1591 | } |
1592 | ||
3be5a52b MS |
1593 | /* |
1594 | * If fi->writectr is positive (no truncate or fsync going on) send | |
1595 | * all queued writepage requests. | |
1596 | * | |
1597 | * Called with fc->lock | |
1598 | */ | |
1599 | void fuse_flush_writepages(struct inode *inode) | |
b9ca67b2 MS |
1600 | __releases(fc->lock) |
1601 | __acquires(fc->lock) | |
b6aeaded | 1602 | { |
3be5a52b MS |
1603 | struct fuse_conn *fc = get_fuse_conn(inode); |
1604 | struct fuse_inode *fi = get_fuse_inode(inode); | |
6eaf4782 | 1605 | size_t crop = i_size_read(inode); |
3be5a52b MS |
1606 | struct fuse_req *req; |
1607 | ||
1608 | while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { | |
1609 | req = list_entry(fi->queued_writes.next, struct fuse_req, list); | |
1610 | list_del_init(&req->list); | |
6eaf4782 | 1611 | fuse_send_writepage(fc, req, crop); |
3be5a52b MS |
1612 | } |
1613 | } | |
1614 | ||
1615 | static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) | |
1616 | { | |
1617 | struct inode *inode = req->inode; | |
1618 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1619 | ||
1620 | mapping_set_error(inode->i_mapping, req->out.h.error); | |
1621 | spin_lock(&fc->lock); | |
8b284dc4 | 1622 | while (req->misc.write.next) { |
6eaf4782 MP |
1623 | struct fuse_conn *fc = get_fuse_conn(inode); |
1624 | struct fuse_write_in *inarg = &req->misc.write.in; | |
8b284dc4 MS |
1625 | struct fuse_req *next = req->misc.write.next; |
1626 | req->misc.write.next = next->misc.write.next; | |
1627 | next->misc.write.next = NULL; | |
ce128de6 | 1628 | next->ff = fuse_file_get(req->ff); |
8b284dc4 | 1629 | list_add(&next->writepages_entry, &fi->writepages); |
6eaf4782 MP |
1630 | |
1631 | /* | |
1632 | * Skip fuse_flush_writepages() to make it easy to crop requests | |
1633 | * based on primary request size. | |
1634 | * | |
1635 | * 1st case (trivial): there are no concurrent activities using | |
1636 | * fuse_set/release_nowrite. Then we're on safe side because | |
1637 | * fuse_flush_writepages() would call fuse_send_writepage() | |
1638 | * anyway. | |
1639 | * | |
1640 | * 2nd case: someone called fuse_set_nowrite and it is waiting | |
1641 | * now for completion of all in-flight requests. This happens | |
1642 | * rarely and no more than once per page, so this should be | |
1643 | * okay. | |
1644 | * | |
1645 | * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle | |
1646 | * of fuse_set_nowrite..fuse_release_nowrite section. The fact | |
1647 | * that fuse_set_nowrite returned implies that all in-flight | |
1648 | * requests were completed along with all of their secondary | |
1649 | * requests. Further primary requests are blocked by negative | |
1650 | * writectr. Hence there cannot be any in-flight requests and | |
1651 | * no invocations of fuse_writepage_end() while we're in | |
1652 | * fuse_set_nowrite..fuse_release_nowrite section. | |
1653 | */ | |
1654 | fuse_send_writepage(fc, next, inarg->offset + inarg->size); | |
8b284dc4 | 1655 | } |
3be5a52b MS |
1656 | fi->writectr--; |
1657 | fuse_writepage_finish(fc, req); | |
1658 | spin_unlock(&fc->lock); | |
1659 | fuse_writepage_free(fc, req); | |
1660 | } | |
1661 | ||
26d614df PE |
1662 | static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc, |
1663 | struct fuse_inode *fi) | |
adcadfa8 | 1664 | { |
72523425 | 1665 | struct fuse_file *ff = NULL; |
adcadfa8 PE |
1666 | |
1667 | spin_lock(&fc->lock); | |
72523425 MS |
1668 | if (!WARN_ON(list_empty(&fi->write_files))) { |
1669 | ff = list_entry(fi->write_files.next, struct fuse_file, | |
1670 | write_entry); | |
1671 | fuse_file_get(ff); | |
1672 | } | |
adcadfa8 PE |
1673 | spin_unlock(&fc->lock); |
1674 | ||
1675 | return ff; | |
1676 | } | |
1677 | ||
3be5a52b MS |
1678 | static int fuse_writepage_locked(struct page *page) |
1679 | { | |
1680 | struct address_space *mapping = page->mapping; | |
1681 | struct inode *inode = mapping->host; | |
1682 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1683 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1684 | struct fuse_req *req; | |
3be5a52b | 1685 | struct page *tmp_page; |
72523425 | 1686 | int error = -ENOMEM; |
3be5a52b MS |
1687 | |
1688 | set_page_writeback(page); | |
1689 | ||
4250c066 | 1690 | req = fuse_request_alloc_nofs(1); |
3be5a52b MS |
1691 | if (!req) |
1692 | goto err; | |
1693 | ||
8b41e671 | 1694 | req->background = 1; /* writeback always goes to bg_queue */ |
3be5a52b MS |
1695 | tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); |
1696 | if (!tmp_page) | |
1697 | goto err_free; | |
1698 | ||
72523425 | 1699 | error = -EIO; |
26d614df | 1700 | req->ff = fuse_write_file_get(fc, fi); |
72523425 MS |
1701 | if (!req->ff) |
1702 | goto err_free; | |
1703 | ||
adcadfa8 | 1704 | fuse_write_fill(req, req->ff, page_offset(page), 0); |
3be5a52b MS |
1705 | |
1706 | copy_highpage(tmp_page, page); | |
2d698b07 | 1707 | req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; |
8b284dc4 | 1708 | req->misc.write.next = NULL; |
f4975c67 | 1709 | req->in.argpages = 1; |
3be5a52b MS |
1710 | req->num_pages = 1; |
1711 | req->pages[0] = tmp_page; | |
b2430d75 | 1712 | req->page_descs[0].offset = 0; |
85f40aec | 1713 | req->page_descs[0].length = PAGE_SIZE; |
3be5a52b MS |
1714 | req->end = fuse_writepage_end; |
1715 | req->inode = inode; | |
1716 | ||
1717 | inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK); | |
1718 | inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); | |
3be5a52b MS |
1719 | |
1720 | spin_lock(&fc->lock); | |
1721 | list_add(&req->writepages_entry, &fi->writepages); | |
1722 | list_add_tail(&req->list, &fi->queued_writes); | |
1723 | fuse_flush_writepages(inode); | |
1724 | spin_unlock(&fc->lock); | |
1725 | ||
4a4ac4eb MP |
1726 | end_page_writeback(page); |
1727 | ||
3be5a52b MS |
1728 | return 0; |
1729 | ||
1730 | err_free: | |
1731 | fuse_request_free(req); | |
1732 | err: | |
1733 | end_page_writeback(page); | |
72523425 | 1734 | return error; |
3be5a52b MS |
1735 | } |
1736 | ||
1737 | static int fuse_writepage(struct page *page, struct writeback_control *wbc) | |
1738 | { | |
1739 | int err; | |
1740 | ||
ff17be08 MS |
1741 | if (fuse_page_is_writeback(page->mapping->host, page->index)) { |
1742 | /* | |
1743 | * ->writepages() should be called for sync() and friends. We | |
1744 | * should only get here on direct reclaim and then we are | |
1745 | * allowed to skip a page which is already in flight | |
1746 | */ | |
1747 | WARN_ON(wbc->sync_mode == WB_SYNC_ALL); | |
1748 | ||
1749 | redirty_page_for_writepage(wbc, page); | |
1750 | return 0; | |
1751 | } | |
1752 | ||
3be5a52b MS |
1753 | err = fuse_writepage_locked(page); |
1754 | unlock_page(page); | |
1755 | ||
1756 | return err; | |
1757 | } | |
1758 | ||
26d614df PE |
1759 | struct fuse_fill_wb_data { |
1760 | struct fuse_req *req; | |
1761 | struct fuse_file *ff; | |
1762 | struct inode *inode; | |
2d033eaa | 1763 | struct page **orig_pages; |
26d614df PE |
1764 | }; |
1765 | ||
1766 | static void fuse_writepages_send(struct fuse_fill_wb_data *data) | |
1767 | { | |
1768 | struct fuse_req *req = data->req; | |
1769 | struct inode *inode = data->inode; | |
1770 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1771 | struct fuse_inode *fi = get_fuse_inode(inode); | |
2d033eaa MP |
1772 | int num_pages = req->num_pages; |
1773 | int i; | |
26d614df PE |
1774 | |
1775 | req->ff = fuse_file_get(data->ff); | |
1776 | spin_lock(&fc->lock); | |
1777 | list_add_tail(&req->list, &fi->queued_writes); | |
1778 | fuse_flush_writepages(inode); | |
1779 | spin_unlock(&fc->lock); | |
2d033eaa MP |
1780 | |
1781 | for (i = 0; i < num_pages; i++) | |
1782 | end_page_writeback(data->orig_pages[i]); | |
26d614df PE |
1783 | } |
1784 | ||
8b284dc4 MS |
1785 | static bool fuse_writepage_in_flight(struct fuse_req *new_req, |
1786 | struct page *page) | |
1787 | { | |
1788 | struct fuse_conn *fc = get_fuse_conn(new_req->inode); | |
1789 | struct fuse_inode *fi = get_fuse_inode(new_req->inode); | |
1790 | struct fuse_req *tmp; | |
1791 | struct fuse_req *old_req; | |
1792 | bool found = false; | |
1793 | pgoff_t curr_index; | |
1794 | ||
1795 | BUG_ON(new_req->num_pages != 0); | |
1796 | ||
1797 | spin_lock(&fc->lock); | |
1798 | list_del(&new_req->writepages_entry); | |
8b284dc4 MS |
1799 | list_for_each_entry(old_req, &fi->writepages, writepages_entry) { |
1800 | BUG_ON(old_req->inode != new_req->inode); | |
1801 | curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT; | |
1802 | if (curr_index <= page->index && | |
1803 | page->index < curr_index + old_req->num_pages) { | |
1804 | found = true; | |
1805 | break; | |
1806 | } | |
1807 | } | |
f6011081 MP |
1808 | if (!found) { |
1809 | list_add(&new_req->writepages_entry, &fi->writepages); | |
8b284dc4 | 1810 | goto out_unlock; |
f6011081 | 1811 | } |
8b284dc4 | 1812 | |
f6011081 | 1813 | new_req->num_pages = 1; |
8b284dc4 MS |
1814 | for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) { |
1815 | BUG_ON(tmp->inode != new_req->inode); | |
1816 | curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT; | |
1817 | if (tmp->num_pages == 1 && | |
1818 | curr_index == page->index) { | |
1819 | old_req = tmp; | |
1820 | } | |
1821 | } | |
1822 | ||
1823 | if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT || | |
1824 | old_req->state == FUSE_REQ_PENDING)) { | |
41b6e41f MP |
1825 | struct backing_dev_info *bdi = page->mapping->backing_dev_info; |
1826 | ||
8b284dc4 MS |
1827 | copy_highpage(old_req->pages[0], page); |
1828 | spin_unlock(&fc->lock); | |
1829 | ||
41b6e41f | 1830 | dec_bdi_stat(bdi, BDI_WRITEBACK); |
8b284dc4 | 1831 | dec_zone_page_state(page, NR_WRITEBACK_TEMP); |
41b6e41f | 1832 | bdi_writeout_inc(bdi); |
8b284dc4 MS |
1833 | fuse_writepage_free(fc, new_req); |
1834 | fuse_request_free(new_req); | |
1835 | goto out; | |
1836 | } else { | |
1837 | new_req->misc.write.next = old_req->misc.write.next; | |
1838 | old_req->misc.write.next = new_req; | |
1839 | } | |
1840 | out_unlock: | |
1841 | spin_unlock(&fc->lock); | |
1842 | out: | |
1843 | return found; | |
1844 | } | |
1845 | ||
26d614df PE |
1846 | static int fuse_writepages_fill(struct page *page, |
1847 | struct writeback_control *wbc, void *_data) | |
1848 | { | |
1849 | struct fuse_fill_wb_data *data = _data; | |
1850 | struct fuse_req *req = data->req; | |
1851 | struct inode *inode = data->inode; | |
1852 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1853 | struct page *tmp_page; | |
8b284dc4 | 1854 | bool is_writeback; |
26d614df PE |
1855 | int err; |
1856 | ||
1857 | if (!data->ff) { | |
1858 | err = -EIO; | |
1859 | data->ff = fuse_write_file_get(fc, get_fuse_inode(inode)); | |
1860 | if (!data->ff) | |
1861 | goto out_unlock; | |
1862 | } | |
1863 | ||
8b284dc4 MS |
1864 | /* |
1865 | * Being under writeback is unlikely but possible. For example direct | |
1866 | * read to an mmaped fuse file will set the page dirty twice; once when | |
1867 | * the pages are faulted with get_user_pages(), and then after the read | |
1868 | * completed. | |
1869 | */ | |
1870 | is_writeback = fuse_page_is_writeback(inode, page->index); | |
1871 | ||
1872 | if (req && req->num_pages && | |
1873 | (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ || | |
1874 | (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write || | |
1875 | data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) { | |
1876 | fuse_writepages_send(data); | |
1877 | data->req = NULL; | |
26d614df PE |
1878 | } |
1879 | err = -ENOMEM; | |
1880 | tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); | |
1881 | if (!tmp_page) | |
1882 | goto out_unlock; | |
1883 | ||
1884 | /* | |
1885 | * The page must not be redirtied until the writeout is completed | |
1886 | * (i.e. userspace has sent a reply to the write request). Otherwise | |
1887 | * there could be more than one temporary page instance for each real | |
1888 | * page. | |
1889 | * | |
1890 | * This is ensured by holding the page lock in page_mkwrite() while | |
1891 | * checking fuse_page_is_writeback(). We already hold the page lock | |
1892 | * since clear_page_dirty_for_io() and keep it held until we add the | |
1893 | * request to the fi->writepages list and increment req->num_pages. | |
1894 | * After this fuse_page_is_writeback() will indicate that the page is | |
1895 | * under writeback, so we can release the page lock. | |
1896 | */ | |
1897 | if (data->req == NULL) { | |
1898 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1899 | ||
1900 | err = -ENOMEM; | |
1901 | req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ); | |
1902 | if (!req) { | |
1903 | __free_page(tmp_page); | |
1904 | goto out_unlock; | |
1905 | } | |
1906 | ||
1907 | fuse_write_fill(req, data->ff, page_offset(page), 0); | |
1908 | req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; | |
8b284dc4 | 1909 | req->misc.write.next = NULL; |
26d614df PE |
1910 | req->in.argpages = 1; |
1911 | req->background = 1; | |
1912 | req->num_pages = 0; | |
1913 | req->end = fuse_writepage_end; | |
1914 | req->inode = inode; | |
1915 | ||
1916 | spin_lock(&fc->lock); | |
1917 | list_add(&req->writepages_entry, &fi->writepages); | |
1918 | spin_unlock(&fc->lock); | |
1919 | ||
1920 | data->req = req; | |
1921 | } | |
1922 | set_page_writeback(page); | |
1923 | ||
1924 | copy_highpage(tmp_page, page); | |
1925 | req->pages[req->num_pages] = tmp_page; | |
1926 | req->page_descs[req->num_pages].offset = 0; | |
1927 | req->page_descs[req->num_pages].length = PAGE_SIZE; | |
1928 | ||
1929 | inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK); | |
1930 | inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); | |
8b284dc4 MS |
1931 | |
1932 | err = 0; | |
1933 | if (is_writeback && fuse_writepage_in_flight(req, page)) { | |
1934 | end_page_writeback(page); | |
1935 | data->req = NULL; | |
1936 | goto out_unlock; | |
1937 | } | |
2d033eaa | 1938 | data->orig_pages[req->num_pages] = page; |
26d614df PE |
1939 | |
1940 | /* | |
1941 | * Protected by fc->lock against concurrent access by | |
1942 | * fuse_page_is_writeback(). | |
1943 | */ | |
1944 | spin_lock(&fc->lock); | |
1945 | req->num_pages++; | |
1946 | spin_unlock(&fc->lock); | |
1947 | ||
26d614df PE |
1948 | out_unlock: |
1949 | unlock_page(page); | |
1950 | ||
1951 | return err; | |
1952 | } | |
1953 | ||
1954 | static int fuse_writepages(struct address_space *mapping, | |
1955 | struct writeback_control *wbc) | |
1956 | { | |
1957 | struct inode *inode = mapping->host; | |
1958 | struct fuse_fill_wb_data data; | |
1959 | int err; | |
1960 | ||
1961 | err = -EIO; | |
1962 | if (is_bad_inode(inode)) | |
1963 | goto out; | |
1964 | ||
1965 | data.inode = inode; | |
1966 | data.req = NULL; | |
1967 | data.ff = NULL; | |
1968 | ||
2d033eaa MP |
1969 | err = -ENOMEM; |
1970 | data.orig_pages = kzalloc(sizeof(struct page *) * | |
1971 | FUSE_MAX_PAGES_PER_REQ, | |
1972 | GFP_NOFS); | |
1973 | if (!data.orig_pages) | |
1974 | goto out; | |
1975 | ||
26d614df PE |
1976 | err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); |
1977 | if (data.req) { | |
1978 | /* Ignore errors if we can write at least one page */ | |
1979 | BUG_ON(!data.req->num_pages); | |
1980 | fuse_writepages_send(&data); | |
1981 | err = 0; | |
1982 | } | |
1983 | if (data.ff) | |
1984 | fuse_file_put(data.ff, false); | |
2d033eaa MP |
1985 | |
1986 | kfree(data.orig_pages); | |
26d614df PE |
1987 | out: |
1988 | return err; | |
1989 | } | |
1990 | ||
6b12c1b3 PE |
1991 | /* |
1992 | * It's worthy to make sure that space is reserved on disk for the write, | |
1993 | * but how to implement it without killing performance need more thinking. | |
1994 | */ | |
1995 | static int fuse_write_begin(struct file *file, struct address_space *mapping, | |
1996 | loff_t pos, unsigned len, unsigned flags, | |
1997 | struct page **pagep, void **fsdata) | |
1998 | { | |
1999 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | |
2000 | struct fuse_conn *fc = get_fuse_conn(file->f_dentry->d_inode); | |
2001 | struct page *page; | |
2002 | loff_t fsize; | |
2003 | int err = -ENOMEM; | |
2004 | ||
2005 | WARN_ON(!fc->writeback_cache); | |
2006 | ||
2007 | page = grab_cache_page_write_begin(mapping, index, flags); | |
2008 | if (!page) | |
2009 | goto error; | |
2010 | ||
2011 | fuse_wait_on_page_writeback(mapping->host, page->index); | |
2012 | ||
2013 | if (PageUptodate(page) || len == PAGE_CACHE_SIZE) | |
2014 | goto success; | |
2015 | /* | |
2016 | * Check if the start this page comes after the end of file, in which | |
2017 | * case the readpage can be optimized away. | |
2018 | */ | |
2019 | fsize = i_size_read(mapping->host); | |
2020 | if (fsize <= (pos & PAGE_CACHE_MASK)) { | |
2021 | size_t off = pos & ~PAGE_CACHE_MASK; | |
2022 | if (off) | |
2023 | zero_user_segment(page, 0, off); | |
2024 | goto success; | |
2025 | } | |
2026 | err = fuse_do_readpage(file, page); | |
2027 | if (err) | |
2028 | goto cleanup; | |
2029 | success: | |
2030 | *pagep = page; | |
2031 | return 0; | |
2032 | ||
2033 | cleanup: | |
2034 | unlock_page(page); | |
2035 | page_cache_release(page); | |
2036 | error: | |
2037 | return err; | |
2038 | } | |
2039 | ||
2040 | static int fuse_write_end(struct file *file, struct address_space *mapping, | |
2041 | loff_t pos, unsigned len, unsigned copied, | |
2042 | struct page *page, void *fsdata) | |
2043 | { | |
2044 | struct inode *inode = page->mapping->host; | |
2045 | ||
2046 | if (!PageUptodate(page)) { | |
2047 | /* Zero any unwritten bytes at the end of the page */ | |
2048 | size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK; | |
2049 | if (endoff) | |
2050 | zero_user_segment(page, endoff, PAGE_CACHE_SIZE); | |
2051 | SetPageUptodate(page); | |
2052 | } | |
2053 | ||
2054 | fuse_write_update_size(inode, pos + copied); | |
2055 | set_page_dirty(page); | |
2056 | unlock_page(page); | |
2057 | page_cache_release(page); | |
2058 | ||
2059 | return copied; | |
2060 | } | |
2061 | ||
3be5a52b MS |
2062 | static int fuse_launder_page(struct page *page) |
2063 | { | |
2064 | int err = 0; | |
2065 | if (clear_page_dirty_for_io(page)) { | |
2066 | struct inode *inode = page->mapping->host; | |
2067 | err = fuse_writepage_locked(page); | |
2068 | if (!err) | |
2069 | fuse_wait_on_page_writeback(inode, page->index); | |
2070 | } | |
2071 | return err; | |
2072 | } | |
2073 | ||
2074 | /* | |
2075 | * Write back dirty pages now, because there may not be any suitable | |
2076 | * open files later | |
2077 | */ | |
2078 | static void fuse_vma_close(struct vm_area_struct *vma) | |
2079 | { | |
2080 | filemap_write_and_wait(vma->vm_file->f_mapping); | |
2081 | } | |
2082 | ||
2083 | /* | |
2084 | * Wait for writeback against this page to complete before allowing it | |
2085 | * to be marked dirty again, and hence written back again, possibly | |
2086 | * before the previous writepage completed. | |
2087 | * | |
2088 | * Block here, instead of in ->writepage(), so that the userspace fs | |
2089 | * can only block processes actually operating on the filesystem. | |
2090 | * | |
2091 | * Otherwise unprivileged userspace fs would be able to block | |
2092 | * unrelated: | |
2093 | * | |
2094 | * - page migration | |
2095 | * - sync(2) | |
2096 | * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER | |
2097 | */ | |
c2ec175c | 2098 | static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) |
3be5a52b | 2099 | { |
c2ec175c | 2100 | struct page *page = vmf->page; |
cca24370 MS |
2101 | struct inode *inode = file_inode(vma->vm_file); |
2102 | ||
2103 | file_update_time(vma->vm_file); | |
2104 | lock_page(page); | |
2105 | if (page->mapping != inode->i_mapping) { | |
2106 | unlock_page(page); | |
2107 | return VM_FAULT_NOPAGE; | |
2108 | } | |
3be5a52b MS |
2109 | |
2110 | fuse_wait_on_page_writeback(inode, page->index); | |
cca24370 | 2111 | return VM_FAULT_LOCKED; |
3be5a52b MS |
2112 | } |
2113 | ||
f0f37e2f | 2114 | static const struct vm_operations_struct fuse_file_vm_ops = { |
3be5a52b MS |
2115 | .close = fuse_vma_close, |
2116 | .fault = filemap_fault, | |
f1820361 | 2117 | .map_pages = filemap_map_pages, |
3be5a52b | 2118 | .page_mkwrite = fuse_page_mkwrite, |
0b173bc4 | 2119 | .remap_pages = generic_file_remap_pages, |
3be5a52b MS |
2120 | }; |
2121 | ||
2122 | static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) | |
2123 | { | |
650b22b9 PE |
2124 | if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) |
2125 | fuse_link_write_file(file); | |
2126 | ||
3be5a52b MS |
2127 | file_accessed(file); |
2128 | vma->vm_ops = &fuse_file_vm_ops; | |
b6aeaded MS |
2129 | return 0; |
2130 | } | |
2131 | ||
fc280c96 MS |
2132 | static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma) |
2133 | { | |
2134 | /* Can't provide the coherency needed for MAP_SHARED */ | |
2135 | if (vma->vm_flags & VM_MAYSHARE) | |
2136 | return -ENODEV; | |
2137 | ||
3121bfe7 MS |
2138 | invalidate_inode_pages2(file->f_mapping); |
2139 | ||
fc280c96 MS |
2140 | return generic_file_mmap(file, vma); |
2141 | } | |
2142 | ||
71421259 MS |
2143 | static int convert_fuse_file_lock(const struct fuse_file_lock *ffl, |
2144 | struct file_lock *fl) | |
2145 | { | |
2146 | switch (ffl->type) { | |
2147 | case F_UNLCK: | |
2148 | break; | |
2149 | ||
2150 | case F_RDLCK: | |
2151 | case F_WRLCK: | |
2152 | if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || | |
2153 | ffl->end < ffl->start) | |
2154 | return -EIO; | |
2155 | ||
2156 | fl->fl_start = ffl->start; | |
2157 | fl->fl_end = ffl->end; | |
2158 | fl->fl_pid = ffl->pid; | |
2159 | break; | |
2160 | ||
2161 | default: | |
2162 | return -EIO; | |
2163 | } | |
2164 | fl->fl_type = ffl->type; | |
2165 | return 0; | |
2166 | } | |
2167 | ||
2168 | static void fuse_lk_fill(struct fuse_req *req, struct file *file, | |
a9ff4f87 MS |
2169 | const struct file_lock *fl, int opcode, pid_t pid, |
2170 | int flock) | |
71421259 | 2171 | { |
6131ffaa | 2172 | struct inode *inode = file_inode(file); |
9c8ef561 | 2173 | struct fuse_conn *fc = get_fuse_conn(inode); |
71421259 MS |
2174 | struct fuse_file *ff = file->private_data; |
2175 | struct fuse_lk_in *arg = &req->misc.lk_in; | |
2176 | ||
2177 | arg->fh = ff->fh; | |
9c8ef561 | 2178 | arg->owner = fuse_lock_owner_id(fc, fl->fl_owner); |
71421259 MS |
2179 | arg->lk.start = fl->fl_start; |
2180 | arg->lk.end = fl->fl_end; | |
2181 | arg->lk.type = fl->fl_type; | |
2182 | arg->lk.pid = pid; | |
a9ff4f87 MS |
2183 | if (flock) |
2184 | arg->lk_flags |= FUSE_LK_FLOCK; | |
71421259 MS |
2185 | req->in.h.opcode = opcode; |
2186 | req->in.h.nodeid = get_node_id(inode); | |
2187 | req->in.numargs = 1; | |
2188 | req->in.args[0].size = sizeof(*arg); | |
2189 | req->in.args[0].value = arg; | |
2190 | } | |
2191 | ||
2192 | static int fuse_getlk(struct file *file, struct file_lock *fl) | |
2193 | { | |
6131ffaa | 2194 | struct inode *inode = file_inode(file); |
71421259 MS |
2195 | struct fuse_conn *fc = get_fuse_conn(inode); |
2196 | struct fuse_req *req; | |
2197 | struct fuse_lk_out outarg; | |
2198 | int err; | |
2199 | ||
b111c8c0 | 2200 | req = fuse_get_req_nopages(fc); |
71421259 MS |
2201 | if (IS_ERR(req)) |
2202 | return PTR_ERR(req); | |
2203 | ||
a9ff4f87 | 2204 | fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0); |
71421259 MS |
2205 | req->out.numargs = 1; |
2206 | req->out.args[0].size = sizeof(outarg); | |
2207 | req->out.args[0].value = &outarg; | |
b93f858a | 2208 | fuse_request_send(fc, req); |
71421259 MS |
2209 | err = req->out.h.error; |
2210 | fuse_put_request(fc, req); | |
2211 | if (!err) | |
2212 | err = convert_fuse_file_lock(&outarg.lk, fl); | |
2213 | ||
2214 | return err; | |
2215 | } | |
2216 | ||
a9ff4f87 | 2217 | static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) |
71421259 | 2218 | { |
6131ffaa | 2219 | struct inode *inode = file_inode(file); |
71421259 MS |
2220 | struct fuse_conn *fc = get_fuse_conn(inode); |
2221 | struct fuse_req *req; | |
2222 | int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; | |
2223 | pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; | |
2224 | int err; | |
2225 | ||
8fb47a4f | 2226 | if (fl->fl_lmops && fl->fl_lmops->lm_grant) { |
48e90761 MS |
2227 | /* NLM needs asynchronous locks, which we don't support yet */ |
2228 | return -ENOLCK; | |
2229 | } | |
2230 | ||
71421259 MS |
2231 | /* Unlock on close is handled by the flush method */ |
2232 | if (fl->fl_flags & FL_CLOSE) | |
2233 | return 0; | |
2234 | ||
b111c8c0 | 2235 | req = fuse_get_req_nopages(fc); |
71421259 MS |
2236 | if (IS_ERR(req)) |
2237 | return PTR_ERR(req); | |
2238 | ||
a9ff4f87 | 2239 | fuse_lk_fill(req, file, fl, opcode, pid, flock); |
b93f858a | 2240 | fuse_request_send(fc, req); |
71421259 | 2241 | err = req->out.h.error; |
a4d27e75 MS |
2242 | /* locking is restartable */ |
2243 | if (err == -EINTR) | |
2244 | err = -ERESTARTSYS; | |
71421259 MS |
2245 | fuse_put_request(fc, req); |
2246 | return err; | |
2247 | } | |
2248 | ||
2249 | static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) | |
2250 | { | |
6131ffaa | 2251 | struct inode *inode = file_inode(file); |
71421259 MS |
2252 | struct fuse_conn *fc = get_fuse_conn(inode); |
2253 | int err; | |
2254 | ||
48e90761 MS |
2255 | if (cmd == F_CANCELLK) { |
2256 | err = 0; | |
2257 | } else if (cmd == F_GETLK) { | |
71421259 | 2258 | if (fc->no_lock) { |
9d6a8c5c | 2259 | posix_test_lock(file, fl); |
71421259 MS |
2260 | err = 0; |
2261 | } else | |
2262 | err = fuse_getlk(file, fl); | |
2263 | } else { | |
2264 | if (fc->no_lock) | |
48e90761 | 2265 | err = posix_lock_file(file, fl, NULL); |
71421259 | 2266 | else |
a9ff4f87 | 2267 | err = fuse_setlk(file, fl, 0); |
71421259 MS |
2268 | } |
2269 | return err; | |
2270 | } | |
2271 | ||
a9ff4f87 MS |
2272 | static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) |
2273 | { | |
6131ffaa | 2274 | struct inode *inode = file_inode(file); |
a9ff4f87 MS |
2275 | struct fuse_conn *fc = get_fuse_conn(inode); |
2276 | int err; | |
2277 | ||
37fb3a30 | 2278 | if (fc->no_flock) { |
a9ff4f87 MS |
2279 | err = flock_lock_file_wait(file, fl); |
2280 | } else { | |
37fb3a30 MS |
2281 | struct fuse_file *ff = file->private_data; |
2282 | ||
a9ff4f87 MS |
2283 | /* emulate flock with POSIX locks */ |
2284 | fl->fl_owner = (fl_owner_t) file; | |
37fb3a30 | 2285 | ff->flock = true; |
a9ff4f87 MS |
2286 | err = fuse_setlk(file, fl, 1); |
2287 | } | |
2288 | ||
2289 | return err; | |
2290 | } | |
2291 | ||
b2d2272f MS |
2292 | static sector_t fuse_bmap(struct address_space *mapping, sector_t block) |
2293 | { | |
2294 | struct inode *inode = mapping->host; | |
2295 | struct fuse_conn *fc = get_fuse_conn(inode); | |
2296 | struct fuse_req *req; | |
2297 | struct fuse_bmap_in inarg; | |
2298 | struct fuse_bmap_out outarg; | |
2299 | int err; | |
2300 | ||
2301 | if (!inode->i_sb->s_bdev || fc->no_bmap) | |
2302 | return 0; | |
2303 | ||
b111c8c0 | 2304 | req = fuse_get_req_nopages(fc); |
b2d2272f MS |
2305 | if (IS_ERR(req)) |
2306 | return 0; | |
2307 | ||
2308 | memset(&inarg, 0, sizeof(inarg)); | |
2309 | inarg.block = block; | |
2310 | inarg.blocksize = inode->i_sb->s_blocksize; | |
2311 | req->in.h.opcode = FUSE_BMAP; | |
2312 | req->in.h.nodeid = get_node_id(inode); | |
2313 | req->in.numargs = 1; | |
2314 | req->in.args[0].size = sizeof(inarg); | |
2315 | req->in.args[0].value = &inarg; | |
2316 | req->out.numargs = 1; | |
2317 | req->out.args[0].size = sizeof(outarg); | |
2318 | req->out.args[0].value = &outarg; | |
b93f858a | 2319 | fuse_request_send(fc, req); |
b2d2272f MS |
2320 | err = req->out.h.error; |
2321 | fuse_put_request(fc, req); | |
2322 | if (err == -ENOSYS) | |
2323 | fc->no_bmap = 1; | |
2324 | ||
2325 | return err ? 0 : outarg.block; | |
2326 | } | |
2327 | ||
965c8e59 | 2328 | static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence) |
5559b8f4 MS |
2329 | { |
2330 | loff_t retval; | |
6131ffaa | 2331 | struct inode *inode = file_inode(file); |
5559b8f4 | 2332 | |
c07c3d19 | 2333 | /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ |
965c8e59 AM |
2334 | if (whence == SEEK_CUR || whence == SEEK_SET) |
2335 | return generic_file_llseek(file, offset, whence); | |
06222e49 | 2336 | |
c07c3d19 MS |
2337 | mutex_lock(&inode->i_mutex); |
2338 | retval = fuse_update_attributes(inode, NULL, file, NULL); | |
2339 | if (!retval) | |
965c8e59 | 2340 | retval = generic_file_llseek(file, offset, whence); |
5559b8f4 | 2341 | mutex_unlock(&inode->i_mutex); |
c07c3d19 | 2342 | |
5559b8f4 MS |
2343 | return retval; |
2344 | } | |
2345 | ||
59efec7b TH |
2346 | static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, |
2347 | unsigned int nr_segs, size_t bytes, bool to_user) | |
2348 | { | |
2349 | struct iov_iter ii; | |
2350 | int page_idx = 0; | |
2351 | ||
2352 | if (!bytes) | |
2353 | return 0; | |
2354 | ||
2355 | iov_iter_init(&ii, iov, nr_segs, bytes, 0); | |
2356 | ||
2357 | while (iov_iter_count(&ii)) { | |
2358 | struct page *page = pages[page_idx++]; | |
2359 | size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); | |
4aa0edd2 | 2360 | void *kaddr; |
59efec7b | 2361 | |
4aa0edd2 | 2362 | kaddr = kmap(page); |
59efec7b TH |
2363 | |
2364 | while (todo) { | |
2365 | char __user *uaddr = ii.iov->iov_base + ii.iov_offset; | |
2366 | size_t iov_len = ii.iov->iov_len - ii.iov_offset; | |
2367 | size_t copy = min(todo, iov_len); | |
2368 | size_t left; | |
2369 | ||
2370 | if (!to_user) | |
2371 | left = copy_from_user(kaddr, uaddr, copy); | |
2372 | else | |
2373 | left = copy_to_user(uaddr, kaddr, copy); | |
2374 | ||
2375 | if (unlikely(left)) | |
2376 | return -EFAULT; | |
2377 | ||
2378 | iov_iter_advance(&ii, copy); | |
2379 | todo -= copy; | |
2380 | kaddr += copy; | |
2381 | } | |
2382 | ||
0bd87182 | 2383 | kunmap(page); |
59efec7b TH |
2384 | } |
2385 | ||
2386 | return 0; | |
2387 | } | |
2388 | ||
d9d318d3 MS |
2389 | /* |
2390 | * CUSE servers compiled on 32bit broke on 64bit kernels because the | |
2391 | * ABI was defined to be 'struct iovec' which is different on 32bit | |
2392 | * and 64bit. Fortunately we can determine which structure the server | |
2393 | * used from the size of the reply. | |
2394 | */ | |
1baa26b2 MS |
2395 | static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src, |
2396 | size_t transferred, unsigned count, | |
2397 | bool is_compat) | |
d9d318d3 MS |
2398 | { |
2399 | #ifdef CONFIG_COMPAT | |
2400 | if (count * sizeof(struct compat_iovec) == transferred) { | |
2401 | struct compat_iovec *ciov = src; | |
2402 | unsigned i; | |
2403 | ||
2404 | /* | |
2405 | * With this interface a 32bit server cannot support | |
2406 | * non-compat (i.e. ones coming from 64bit apps) ioctl | |
2407 | * requests | |
2408 | */ | |
2409 | if (!is_compat) | |
2410 | return -EINVAL; | |
2411 | ||
2412 | for (i = 0; i < count; i++) { | |
2413 | dst[i].iov_base = compat_ptr(ciov[i].iov_base); | |
2414 | dst[i].iov_len = ciov[i].iov_len; | |
2415 | } | |
2416 | return 0; | |
2417 | } | |
2418 | #endif | |
2419 | ||
2420 | if (count * sizeof(struct iovec) != transferred) | |
2421 | return -EIO; | |
2422 | ||
2423 | memcpy(dst, src, transferred); | |
2424 | return 0; | |
2425 | } | |
2426 | ||
7572777e MS |
2427 | /* Make sure iov_length() won't overflow */ |
2428 | static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) | |
2429 | { | |
2430 | size_t n; | |
2431 | u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; | |
2432 | ||
fb6ccff6 | 2433 | for (n = 0; n < count; n++, iov++) { |
7572777e MS |
2434 | if (iov->iov_len > (size_t) max) |
2435 | return -ENOMEM; | |
2436 | max -= iov->iov_len; | |
2437 | } | |
2438 | return 0; | |
2439 | } | |
2440 | ||
1baa26b2 MS |
2441 | static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst, |
2442 | void *src, size_t transferred, unsigned count, | |
2443 | bool is_compat) | |
2444 | { | |
2445 | unsigned i; | |
2446 | struct fuse_ioctl_iovec *fiov = src; | |
2447 | ||
2448 | if (fc->minor < 16) { | |
2449 | return fuse_copy_ioctl_iovec_old(dst, src, transferred, | |
2450 | count, is_compat); | |
2451 | } | |
2452 | ||
2453 | if (count * sizeof(struct fuse_ioctl_iovec) != transferred) | |
2454 | return -EIO; | |
2455 | ||
2456 | for (i = 0; i < count; i++) { | |
2457 | /* Did the server supply an inappropriate value? */ | |
2458 | if (fiov[i].base != (unsigned long) fiov[i].base || | |
2459 | fiov[i].len != (unsigned long) fiov[i].len) | |
2460 | return -EIO; | |
2461 | ||
2462 | dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base; | |
2463 | dst[i].iov_len = (size_t) fiov[i].len; | |
2464 | ||
2465 | #ifdef CONFIG_COMPAT | |
2466 | if (is_compat && | |
2467 | (ptr_to_compat(dst[i].iov_base) != fiov[i].base || | |
2468 | (compat_size_t) dst[i].iov_len != fiov[i].len)) | |
2469 | return -EIO; | |
2470 | #endif | |
2471 | } | |
2472 | ||
2473 | return 0; | |
2474 | } | |
2475 | ||
2476 | ||
59efec7b TH |
2477 | /* |
2478 | * For ioctls, there is no generic way to determine how much memory | |
2479 | * needs to be read and/or written. Furthermore, ioctls are allowed | |
2480 | * to dereference the passed pointer, so the parameter requires deep | |
2481 | * copying but FUSE has no idea whatsoever about what to copy in or | |
2482 | * out. | |
2483 | * | |
2484 | * This is solved by allowing FUSE server to retry ioctl with | |
2485 | * necessary in/out iovecs. Let's assume the ioctl implementation | |
2486 | * needs to read in the following structure. | |
2487 | * | |
2488 | * struct a { | |
2489 | * char *buf; | |
2490 | * size_t buflen; | |
2491 | * } | |
2492 | * | |
2493 | * On the first callout to FUSE server, inarg->in_size and | |
2494 | * inarg->out_size will be NULL; then, the server completes the ioctl | |
2495 | * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and | |
2496 | * the actual iov array to | |
2497 | * | |
2498 | * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } } | |
2499 | * | |
2500 | * which tells FUSE to copy in the requested area and retry the ioctl. | |
2501 | * On the second round, the server has access to the structure and | |
2502 | * from that it can tell what to look for next, so on the invocation, | |
2503 | * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to | |
2504 | * | |
2505 | * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) }, | |
2506 | * { .iov_base = a.buf, .iov_len = a.buflen } } | |
2507 | * | |
2508 | * FUSE will copy both struct a and the pointed buffer from the | |
2509 | * process doing the ioctl and retry ioctl with both struct a and the | |
2510 | * buffer. | |
2511 | * | |
2512 | * This time, FUSE server has everything it needs and completes ioctl | |
2513 | * without FUSE_IOCTL_RETRY which finishes the ioctl call. | |
2514 | * | |
2515 | * Copying data out works the same way. | |
2516 | * | |
2517 | * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel | |
2518 | * automatically initializes in and out iovs by decoding @cmd with | |
2519 | * _IOC_* macros and the server is not allowed to request RETRY. This | |
2520 | * limits ioctl data transfers to well-formed ioctls and is the forced | |
2521 | * behavior for all FUSE servers. | |
2522 | */ | |
08cbf542 TH |
2523 | long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, |
2524 | unsigned int flags) | |
59efec7b | 2525 | { |
59efec7b | 2526 | struct fuse_file *ff = file->private_data; |
d36f2487 | 2527 | struct fuse_conn *fc = ff->fc; |
59efec7b TH |
2528 | struct fuse_ioctl_in inarg = { |
2529 | .fh = ff->fh, | |
2530 | .cmd = cmd, | |
2531 | .arg = arg, | |
2532 | .flags = flags | |
2533 | }; | |
2534 | struct fuse_ioctl_out outarg; | |
2535 | struct fuse_req *req = NULL; | |
2536 | struct page **pages = NULL; | |
8ac83505 | 2537 | struct iovec *iov_page = NULL; |
59efec7b TH |
2538 | struct iovec *in_iov = NULL, *out_iov = NULL; |
2539 | unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; | |
2540 | size_t in_size, out_size, transferred; | |
2541 | int err; | |
2542 | ||
1baa26b2 MS |
2543 | #if BITS_PER_LONG == 32 |
2544 | inarg.flags |= FUSE_IOCTL_32BIT; | |
2545 | #else | |
2546 | if (flags & FUSE_IOCTL_COMPAT) | |
2547 | inarg.flags |= FUSE_IOCTL_32BIT; | |
2548 | #endif | |
2549 | ||
59efec7b | 2550 | /* assume all the iovs returned by client always fits in a page */ |
1baa26b2 | 2551 | BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); |
59efec7b | 2552 | |
59efec7b | 2553 | err = -ENOMEM; |
c411cc88 | 2554 | pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL); |
8ac83505 | 2555 | iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); |
59efec7b TH |
2556 | if (!pages || !iov_page) |
2557 | goto out; | |
2558 | ||
2559 | /* | |
2560 | * If restricted, initialize IO parameters as encoded in @cmd. | |
2561 | * RETRY from server is not allowed. | |
2562 | */ | |
2563 | if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { | |
8ac83505 | 2564 | struct iovec *iov = iov_page; |
59efec7b | 2565 | |
c9f0523d | 2566 | iov->iov_base = (void __user *)arg; |
59efec7b TH |
2567 | iov->iov_len = _IOC_SIZE(cmd); |
2568 | ||
2569 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | |
2570 | in_iov = iov; | |
2571 | in_iovs = 1; | |
2572 | } | |
2573 | ||
2574 | if (_IOC_DIR(cmd) & _IOC_READ) { | |
2575 | out_iov = iov; | |
2576 | out_iovs = 1; | |
2577 | } | |
2578 | } | |
2579 | ||
2580 | retry: | |
2581 | inarg.in_size = in_size = iov_length(in_iov, in_iovs); | |
2582 | inarg.out_size = out_size = iov_length(out_iov, out_iovs); | |
2583 | ||
2584 | /* | |
2585 | * Out data can be used either for actual out data or iovs, | |
2586 | * make sure there always is at least one page. | |
2587 | */ | |
2588 | out_size = max_t(size_t, out_size, PAGE_SIZE); | |
2589 | max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); | |
2590 | ||
2591 | /* make sure there are enough buffer pages and init request with them */ | |
2592 | err = -ENOMEM; | |
2593 | if (max_pages > FUSE_MAX_PAGES_PER_REQ) | |
2594 | goto out; | |
2595 | while (num_pages < max_pages) { | |
2596 | pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); | |
2597 | if (!pages[num_pages]) | |
2598 | goto out; | |
2599 | num_pages++; | |
2600 | } | |
2601 | ||
54b96670 | 2602 | req = fuse_get_req(fc, num_pages); |
59efec7b TH |
2603 | if (IS_ERR(req)) { |
2604 | err = PTR_ERR(req); | |
2605 | req = NULL; | |
2606 | goto out; | |
2607 | } | |
2608 | memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); | |
2609 | req->num_pages = num_pages; | |
7c190c8b | 2610 | fuse_page_descs_length_init(req, 0, req->num_pages); |
59efec7b TH |
2611 | |
2612 | /* okay, let's send it to the client */ | |
2613 | req->in.h.opcode = FUSE_IOCTL; | |
d36f2487 | 2614 | req->in.h.nodeid = ff->nodeid; |
59efec7b TH |
2615 | req->in.numargs = 1; |
2616 | req->in.args[0].size = sizeof(inarg); | |
2617 | req->in.args[0].value = &inarg; | |
2618 | if (in_size) { | |
2619 | req->in.numargs++; | |
2620 | req->in.args[1].size = in_size; | |
2621 | req->in.argpages = 1; | |
2622 | ||
2623 | err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size, | |
2624 | false); | |
2625 | if (err) | |
2626 | goto out; | |
2627 | } | |
2628 | ||
2629 | req->out.numargs = 2; | |
2630 | req->out.args[0].size = sizeof(outarg); | |
2631 | req->out.args[0].value = &outarg; | |
2632 | req->out.args[1].size = out_size; | |
2633 | req->out.argpages = 1; | |
2634 | req->out.argvar = 1; | |
2635 | ||
b93f858a | 2636 | fuse_request_send(fc, req); |
59efec7b TH |
2637 | err = req->out.h.error; |
2638 | transferred = req->out.args[1].size; | |
2639 | fuse_put_request(fc, req); | |
2640 | req = NULL; | |
2641 | if (err) | |
2642 | goto out; | |
2643 | ||
2644 | /* did it ask for retry? */ | |
2645 | if (outarg.flags & FUSE_IOCTL_RETRY) { | |
8ac83505 | 2646 | void *vaddr; |
59efec7b TH |
2647 | |
2648 | /* no retry if in restricted mode */ | |
2649 | err = -EIO; | |
2650 | if (!(flags & FUSE_IOCTL_UNRESTRICTED)) | |
2651 | goto out; | |
2652 | ||
2653 | in_iovs = outarg.in_iovs; | |
2654 | out_iovs = outarg.out_iovs; | |
2655 | ||
2656 | /* | |
2657 | * Make sure things are in boundary, separate checks | |
2658 | * are to protect against overflow. | |
2659 | */ | |
2660 | err = -ENOMEM; | |
2661 | if (in_iovs > FUSE_IOCTL_MAX_IOV || | |
2662 | out_iovs > FUSE_IOCTL_MAX_IOV || | |
2663 | in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) | |
2664 | goto out; | |
2665 | ||
2408f6ef | 2666 | vaddr = kmap_atomic(pages[0]); |
1baa26b2 | 2667 | err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, |
d9d318d3 MS |
2668 | transferred, in_iovs + out_iovs, |
2669 | (flags & FUSE_IOCTL_COMPAT) != 0); | |
2408f6ef | 2670 | kunmap_atomic(vaddr); |
d9d318d3 MS |
2671 | if (err) |
2672 | goto out; | |
59efec7b | 2673 | |
8ac83505 | 2674 | in_iov = iov_page; |
59efec7b TH |
2675 | out_iov = in_iov + in_iovs; |
2676 | ||
7572777e MS |
2677 | err = fuse_verify_ioctl_iov(in_iov, in_iovs); |
2678 | if (err) | |
2679 | goto out; | |
2680 | ||
2681 | err = fuse_verify_ioctl_iov(out_iov, out_iovs); | |
2682 | if (err) | |
2683 | goto out; | |
2684 | ||
59efec7b TH |
2685 | goto retry; |
2686 | } | |
2687 | ||
2688 | err = -EIO; | |
2689 | if (transferred > inarg.out_size) | |
2690 | goto out; | |
2691 | ||
2692 | err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true); | |
2693 | out: | |
2694 | if (req) | |
2695 | fuse_put_request(fc, req); | |
8ac83505 | 2696 | free_page((unsigned long) iov_page); |
59efec7b TH |
2697 | while (num_pages) |
2698 | __free_page(pages[--num_pages]); | |
2699 | kfree(pages); | |
2700 | ||
2701 | return err ? err : outarg.result; | |
2702 | } | |
08cbf542 | 2703 | EXPORT_SYMBOL_GPL(fuse_do_ioctl); |
59efec7b | 2704 | |
b18da0c5 MS |
2705 | long fuse_ioctl_common(struct file *file, unsigned int cmd, |
2706 | unsigned long arg, unsigned int flags) | |
d36f2487 | 2707 | { |
6131ffaa | 2708 | struct inode *inode = file_inode(file); |
d36f2487 MS |
2709 | struct fuse_conn *fc = get_fuse_conn(inode); |
2710 | ||
c2132c1b | 2711 | if (!fuse_allow_current_process(fc)) |
d36f2487 MS |
2712 | return -EACCES; |
2713 | ||
2714 | if (is_bad_inode(inode)) | |
2715 | return -EIO; | |
2716 | ||
2717 | return fuse_do_ioctl(file, cmd, arg, flags); | |
2718 | } | |
2719 | ||
59efec7b TH |
2720 | static long fuse_file_ioctl(struct file *file, unsigned int cmd, |
2721 | unsigned long arg) | |
2722 | { | |
b18da0c5 | 2723 | return fuse_ioctl_common(file, cmd, arg, 0); |
59efec7b TH |
2724 | } |
2725 | ||
2726 | static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, | |
2727 | unsigned long arg) | |
2728 | { | |
b18da0c5 | 2729 | return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); |
59efec7b TH |
2730 | } |
2731 | ||
95668a69 TH |
2732 | /* |
2733 | * All files which have been polled are linked to RB tree | |
2734 | * fuse_conn->polled_files which is indexed by kh. Walk the tree and | |
2735 | * find the matching one. | |
2736 | */ | |
2737 | static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, | |
2738 | struct rb_node **parent_out) | |
2739 | { | |
2740 | struct rb_node **link = &fc->polled_files.rb_node; | |
2741 | struct rb_node *last = NULL; | |
2742 | ||
2743 | while (*link) { | |
2744 | struct fuse_file *ff; | |
2745 | ||
2746 | last = *link; | |
2747 | ff = rb_entry(last, struct fuse_file, polled_node); | |
2748 | ||
2749 | if (kh < ff->kh) | |
2750 | link = &last->rb_left; | |
2751 | else if (kh > ff->kh) | |
2752 | link = &last->rb_right; | |
2753 | else | |
2754 | return link; | |
2755 | } | |
2756 | ||
2757 | if (parent_out) | |
2758 | *parent_out = last; | |
2759 | return link; | |
2760 | } | |
2761 | ||
2762 | /* | |
2763 | * The file is about to be polled. Make sure it's on the polled_files | |
2764 | * RB tree. Note that files once added to the polled_files tree are | |
2765 | * not removed before the file is released. This is because a file | |
2766 | * polled once is likely to be polled again. | |
2767 | */ | |
2768 | static void fuse_register_polled_file(struct fuse_conn *fc, | |
2769 | struct fuse_file *ff) | |
2770 | { | |
2771 | spin_lock(&fc->lock); | |
2772 | if (RB_EMPTY_NODE(&ff->polled_node)) { | |
f3846266 | 2773 | struct rb_node **link, *uninitialized_var(parent); |
95668a69 TH |
2774 | |
2775 | link = fuse_find_polled_node(fc, ff->kh, &parent); | |
2776 | BUG_ON(*link); | |
2777 | rb_link_node(&ff->polled_node, parent, link); | |
2778 | rb_insert_color(&ff->polled_node, &fc->polled_files); | |
2779 | } | |
2780 | spin_unlock(&fc->lock); | |
2781 | } | |
2782 | ||
08cbf542 | 2783 | unsigned fuse_file_poll(struct file *file, poll_table *wait) |
95668a69 | 2784 | { |
95668a69 | 2785 | struct fuse_file *ff = file->private_data; |
797759aa | 2786 | struct fuse_conn *fc = ff->fc; |
95668a69 TH |
2787 | struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; |
2788 | struct fuse_poll_out outarg; | |
2789 | struct fuse_req *req; | |
2790 | int err; | |
2791 | ||
2792 | if (fc->no_poll) | |
2793 | return DEFAULT_POLLMASK; | |
2794 | ||
2795 | poll_wait(file, &ff->poll_wait, wait); | |
0415d291 | 2796 | inarg.events = (__u32)poll_requested_events(wait); |
95668a69 TH |
2797 | |
2798 | /* | |
2799 | * Ask for notification iff there's someone waiting for it. | |
2800 | * The client may ignore the flag and always notify. | |
2801 | */ | |
2802 | if (waitqueue_active(&ff->poll_wait)) { | |
2803 | inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; | |
2804 | fuse_register_polled_file(fc, ff); | |
2805 | } | |
2806 | ||
b111c8c0 | 2807 | req = fuse_get_req_nopages(fc); |
95668a69 | 2808 | if (IS_ERR(req)) |
201fa69a | 2809 | return POLLERR; |
95668a69 TH |
2810 | |
2811 | req->in.h.opcode = FUSE_POLL; | |
797759aa | 2812 | req->in.h.nodeid = ff->nodeid; |
95668a69 TH |
2813 | req->in.numargs = 1; |
2814 | req->in.args[0].size = sizeof(inarg); | |
2815 | req->in.args[0].value = &inarg; | |
2816 | req->out.numargs = 1; | |
2817 | req->out.args[0].size = sizeof(outarg); | |
2818 | req->out.args[0].value = &outarg; | |
b93f858a | 2819 | fuse_request_send(fc, req); |
95668a69 TH |
2820 | err = req->out.h.error; |
2821 | fuse_put_request(fc, req); | |
2822 | ||
2823 | if (!err) | |
2824 | return outarg.revents; | |
2825 | if (err == -ENOSYS) { | |
2826 | fc->no_poll = 1; | |
2827 | return DEFAULT_POLLMASK; | |
2828 | } | |
2829 | return POLLERR; | |
2830 | } | |
08cbf542 | 2831 | EXPORT_SYMBOL_GPL(fuse_file_poll); |
95668a69 TH |
2832 | |
2833 | /* | |
2834 | * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and | |
2835 | * wakes up the poll waiters. | |
2836 | */ | |
2837 | int fuse_notify_poll_wakeup(struct fuse_conn *fc, | |
2838 | struct fuse_notify_poll_wakeup_out *outarg) | |
2839 | { | |
2840 | u64 kh = outarg->kh; | |
2841 | struct rb_node **link; | |
2842 | ||
2843 | spin_lock(&fc->lock); | |
2844 | ||
2845 | link = fuse_find_polled_node(fc, kh, NULL); | |
2846 | if (*link) { | |
2847 | struct fuse_file *ff; | |
2848 | ||
2849 | ff = rb_entry(*link, struct fuse_file, polled_node); | |
2850 | wake_up_interruptible_sync(&ff->poll_wait); | |
2851 | } | |
2852 | ||
2853 | spin_unlock(&fc->lock); | |
2854 | return 0; | |
2855 | } | |
2856 | ||
efb9fa9e MP |
2857 | static void fuse_do_truncate(struct file *file) |
2858 | { | |
2859 | struct inode *inode = file->f_mapping->host; | |
2860 | struct iattr attr; | |
2861 | ||
2862 | attr.ia_valid = ATTR_SIZE; | |
2863 | attr.ia_size = i_size_read(inode); | |
2864 | ||
2865 | attr.ia_file = file; | |
2866 | attr.ia_valid |= ATTR_FILE; | |
2867 | ||
2868 | fuse_do_setattr(inode, &attr, file); | |
2869 | } | |
2870 | ||
e5c5f05d MP |
2871 | static inline loff_t fuse_round_up(loff_t off) |
2872 | { | |
2873 | return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); | |
2874 | } | |
2875 | ||
4273b793 AA |
2876 | static ssize_t |
2877 | fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |
2878 | loff_t offset, unsigned long nr_segs) | |
2879 | { | |
2880 | ssize_t ret = 0; | |
60b9df7a MS |
2881 | struct file *file = iocb->ki_filp; |
2882 | struct fuse_file *ff = file->private_data; | |
e5c5f05d | 2883 | bool async_dio = ff->fc->async_dio; |
4273b793 | 2884 | loff_t pos = 0; |
bcba24cc MP |
2885 | struct inode *inode; |
2886 | loff_t i_size; | |
2887 | size_t count = iov_length(iov, nr_segs); | |
36cf66ed | 2888 | struct fuse_io_priv *io; |
4273b793 | 2889 | |
4273b793 | 2890 | pos = offset; |
bcba24cc MP |
2891 | inode = file->f_mapping->host; |
2892 | i_size = i_size_read(inode); | |
4273b793 | 2893 | |
9fe55eea SW |
2894 | if ((rw == READ) && (offset > i_size)) |
2895 | return 0; | |
2896 | ||
439ee5f0 | 2897 | /* optimization for short read */ |
e5c5f05d | 2898 | if (async_dio && rw != WRITE && offset + count > i_size) { |
439ee5f0 MP |
2899 | if (offset >= i_size) |
2900 | return 0; | |
e5c5f05d | 2901 | count = min_t(loff_t, count, fuse_round_up(i_size - offset)); |
439ee5f0 MP |
2902 | } |
2903 | ||
bcba24cc | 2904 | io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); |
36cf66ed MP |
2905 | if (!io) |
2906 | return -ENOMEM; | |
bcba24cc MP |
2907 | spin_lock_init(&io->lock); |
2908 | io->reqs = 1; | |
2909 | io->bytes = -1; | |
2910 | io->size = 0; | |
2911 | io->offset = offset; | |
2912 | io->write = (rw == WRITE); | |
2913 | io->err = 0; | |
36cf66ed | 2914 | io->file = file; |
bcba24cc MP |
2915 | /* |
2916 | * By default, we want to optimize all I/Os with async request | |
60b9df7a | 2917 | * submission to the client filesystem if supported. |
bcba24cc | 2918 | */ |
e5c5f05d | 2919 | io->async = async_dio; |
bcba24cc MP |
2920 | io->iocb = iocb; |
2921 | ||
2922 | /* | |
2923 | * We cannot asynchronously extend the size of a file. We have no method | |
2924 | * to wait on real async I/O requests, so we must submit this request | |
2925 | * synchronously. | |
2926 | */ | |
e5c5f05d | 2927 | if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE) |
bcba24cc | 2928 | io->async = false; |
4273b793 | 2929 | |
b98d023a | 2930 | if (rw == WRITE) |
36cf66ed | 2931 | ret = __fuse_direct_write(io, iov, nr_segs, &pos); |
b98d023a | 2932 | else |
439ee5f0 | 2933 | ret = __fuse_direct_read(io, iov, nr_segs, &pos, count); |
36cf66ed | 2934 | |
bcba24cc MP |
2935 | if (io->async) { |
2936 | fuse_aio_complete(io, ret < 0 ? ret : 0, -1); | |
2937 | ||
2938 | /* we have a non-extending, async request, so return */ | |
c9ecf989 | 2939 | if (!is_sync_kiocb(iocb)) |
bcba24cc MP |
2940 | return -EIOCBQUEUED; |
2941 | ||
2942 | ret = wait_on_sync_kiocb(iocb); | |
2943 | } else { | |
2944 | kfree(io); | |
2945 | } | |
2946 | ||
efb9fa9e MP |
2947 | if (rw == WRITE) { |
2948 | if (ret > 0) | |
2949 | fuse_write_update_size(inode, pos); | |
2950 | else if (ret < 0 && offset + count > i_size) | |
2951 | fuse_do_truncate(file); | |
2952 | } | |
4273b793 AA |
2953 | |
2954 | return ret; | |
2955 | } | |
2956 | ||
cdadb11c MS |
2957 | static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, |
2958 | loff_t length) | |
05ba1f08 AP |
2959 | { |
2960 | struct fuse_file *ff = file->private_data; | |
3634a632 | 2961 | struct inode *inode = file->f_inode; |
0ab08f57 | 2962 | struct fuse_inode *fi = get_fuse_inode(inode); |
05ba1f08 AP |
2963 | struct fuse_conn *fc = ff->fc; |
2964 | struct fuse_req *req; | |
2965 | struct fuse_fallocate_in inarg = { | |
2966 | .fh = ff->fh, | |
2967 | .offset = offset, | |
2968 | .length = length, | |
2969 | .mode = mode | |
2970 | }; | |
2971 | int err; | |
14c14414 MP |
2972 | bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || |
2973 | (mode & FALLOC_FL_PUNCH_HOLE); | |
05ba1f08 | 2974 | |
519c6040 MS |
2975 | if (fc->no_fallocate) |
2976 | return -EOPNOTSUPP; | |
2977 | ||
14c14414 | 2978 | if (lock_inode) { |
3634a632 | 2979 | mutex_lock(&inode->i_mutex); |
bde52788 MP |
2980 | if (mode & FALLOC_FL_PUNCH_HOLE) { |
2981 | loff_t endbyte = offset + length - 1; | |
2982 | err = filemap_write_and_wait_range(inode->i_mapping, | |
2983 | offset, endbyte); | |
2984 | if (err) | |
2985 | goto out; | |
2986 | ||
2987 | fuse_sync_writes(inode); | |
2988 | } | |
3634a632 BF |
2989 | } |
2990 | ||
0ab08f57 MP |
2991 | if (!(mode & FALLOC_FL_KEEP_SIZE)) |
2992 | set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); | |
2993 | ||
b111c8c0 | 2994 | req = fuse_get_req_nopages(fc); |
3634a632 BF |
2995 | if (IS_ERR(req)) { |
2996 | err = PTR_ERR(req); | |
2997 | goto out; | |
2998 | } | |
05ba1f08 AP |
2999 | |
3000 | req->in.h.opcode = FUSE_FALLOCATE; | |
3001 | req->in.h.nodeid = ff->nodeid; | |
3002 | req->in.numargs = 1; | |
3003 | req->in.args[0].size = sizeof(inarg); | |
3004 | req->in.args[0].value = &inarg; | |
3005 | fuse_request_send(fc, req); | |
3006 | err = req->out.h.error; | |
519c6040 MS |
3007 | if (err == -ENOSYS) { |
3008 | fc->no_fallocate = 1; | |
3009 | err = -EOPNOTSUPP; | |
3010 | } | |
05ba1f08 AP |
3011 | fuse_put_request(fc, req); |
3012 | ||
bee6c307 BF |
3013 | if (err) |
3014 | goto out; | |
3015 | ||
3016 | /* we could have extended the file */ | |
b0aa7606 MP |
3017 | if (!(mode & FALLOC_FL_KEEP_SIZE)) { |
3018 | bool changed = fuse_write_update_size(inode, offset + length); | |
3019 | ||
3020 | if (changed && fc->writeback_cache) { | |
3021 | struct fuse_inode *fi = get_fuse_inode(inode); | |
3022 | ||
3023 | inode->i_mtime = current_fs_time(inode->i_sb); | |
3024 | set_bit(FUSE_I_MTIME_DIRTY, &fi->state); | |
3025 | } | |
3026 | } | |
bee6c307 BF |
3027 | |
3028 | if (mode & FALLOC_FL_PUNCH_HOLE) | |
3029 | truncate_pagecache_range(inode, offset, offset + length - 1); | |
3030 | ||
3031 | fuse_invalidate_attr(inode); | |
3032 | ||
3634a632 | 3033 | out: |
0ab08f57 MP |
3034 | if (!(mode & FALLOC_FL_KEEP_SIZE)) |
3035 | clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); | |
3036 | ||
bde52788 | 3037 | if (lock_inode) |
3634a632 | 3038 | mutex_unlock(&inode->i_mutex); |
3634a632 | 3039 | |
05ba1f08 AP |
3040 | return err; |
3041 | } | |
05ba1f08 | 3042 | |
4b6f5d20 | 3043 | static const struct file_operations fuse_file_operations = { |
5559b8f4 | 3044 | .llseek = fuse_file_llseek, |
543ade1f | 3045 | .read = do_sync_read, |
bcb4be80 | 3046 | .aio_read = fuse_file_aio_read, |
543ade1f | 3047 | .write = do_sync_write, |
ea9b9907 | 3048 | .aio_write = fuse_file_aio_write, |
b6aeaded MS |
3049 | .mmap = fuse_file_mmap, |
3050 | .open = fuse_open, | |
3051 | .flush = fuse_flush, | |
3052 | .release = fuse_release, | |
3053 | .fsync = fuse_fsync, | |
71421259 | 3054 | .lock = fuse_file_lock, |
a9ff4f87 | 3055 | .flock = fuse_file_flock, |
5ffc4ef4 | 3056 | .splice_read = generic_file_splice_read, |
59efec7b TH |
3057 | .unlocked_ioctl = fuse_file_ioctl, |
3058 | .compat_ioctl = fuse_file_compat_ioctl, | |
95668a69 | 3059 | .poll = fuse_file_poll, |
05ba1f08 | 3060 | .fallocate = fuse_file_fallocate, |
b6aeaded MS |
3061 | }; |
3062 | ||
4b6f5d20 | 3063 | static const struct file_operations fuse_direct_io_file_operations = { |
5559b8f4 | 3064 | .llseek = fuse_file_llseek, |
413ef8cb MS |
3065 | .read = fuse_direct_read, |
3066 | .write = fuse_direct_write, | |
fc280c96 | 3067 | .mmap = fuse_direct_mmap, |
413ef8cb MS |
3068 | .open = fuse_open, |
3069 | .flush = fuse_flush, | |
3070 | .release = fuse_release, | |
3071 | .fsync = fuse_fsync, | |
71421259 | 3072 | .lock = fuse_file_lock, |
a9ff4f87 | 3073 | .flock = fuse_file_flock, |
59efec7b TH |
3074 | .unlocked_ioctl = fuse_file_ioctl, |
3075 | .compat_ioctl = fuse_file_compat_ioctl, | |
95668a69 | 3076 | .poll = fuse_file_poll, |
05ba1f08 | 3077 | .fallocate = fuse_file_fallocate, |
fc280c96 | 3078 | /* no splice_read */ |
413ef8cb MS |
3079 | }; |
3080 | ||
f5e54d6e | 3081 | static const struct address_space_operations fuse_file_aops = { |
b6aeaded | 3082 | .readpage = fuse_readpage, |
3be5a52b | 3083 | .writepage = fuse_writepage, |
26d614df | 3084 | .writepages = fuse_writepages, |
3be5a52b | 3085 | .launder_page = fuse_launder_page, |
db50b96c | 3086 | .readpages = fuse_readpages, |
3be5a52b | 3087 | .set_page_dirty = __set_page_dirty_nobuffers, |
b2d2272f | 3088 | .bmap = fuse_bmap, |
4273b793 | 3089 | .direct_IO = fuse_direct_IO, |
6b12c1b3 PE |
3090 | .write_begin = fuse_write_begin, |
3091 | .write_end = fuse_write_end, | |
b6aeaded MS |
3092 | }; |
3093 | ||
3094 | void fuse_init_file_inode(struct inode *inode) | |
3095 | { | |
45323fb7 MS |
3096 | inode->i_fop = &fuse_file_operations; |
3097 | inode->i_data.a_ops = &fuse_file_aops; | |
b6aeaded | 3098 | } |