]>
Commit | Line | Data |
---|---|---|
b6aeaded MS |
1 | /* |
2 | FUSE: Filesystem in Userspace | |
1729a16c | 3 | Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> |
b6aeaded MS |
4 | |
5 | This program can be distributed under the terms of the GNU GPL. | |
6 | See the file COPYING. | |
7 | */ | |
8 | ||
9 | #include "fuse_i.h" | |
10 | ||
11 | #include <linux/pagemap.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/kernel.h> | |
e8edc6e0 | 14 | #include <linux/sched.h> |
08cbf542 | 15 | #include <linux/module.h> |
d9d318d3 | 16 | #include <linux/compat.h> |
478e0841 | 17 | #include <linux/swap.h> |
a27bb332 | 18 | #include <linux/aio.h> |
3634a632 | 19 | #include <linux/falloc.h> |
b6aeaded | 20 | |
4b6f5d20 | 21 | static const struct file_operations fuse_direct_io_file_operations; |
45323fb7 | 22 | |
91fe96b4 MS |
23 | static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, |
24 | int opcode, struct fuse_open_out *outargp) | |
b6aeaded | 25 | { |
b6aeaded | 26 | struct fuse_open_in inarg; |
fd72faac MS |
27 | struct fuse_req *req; |
28 | int err; | |
29 | ||
b111c8c0 | 30 | req = fuse_get_req_nopages(fc); |
ce1d5a49 MS |
31 | if (IS_ERR(req)) |
32 | return PTR_ERR(req); | |
fd72faac MS |
33 | |
34 | memset(&inarg, 0, sizeof(inarg)); | |
6ff958ed MS |
35 | inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); |
36 | if (!fc->atomic_o_trunc) | |
37 | inarg.flags &= ~O_TRUNC; | |
91fe96b4 MS |
38 | req->in.h.opcode = opcode; |
39 | req->in.h.nodeid = nodeid; | |
fd72faac MS |
40 | req->in.numargs = 1; |
41 | req->in.args[0].size = sizeof(inarg); | |
42 | req->in.args[0].value = &inarg; | |
43 | req->out.numargs = 1; | |
44 | req->out.args[0].size = sizeof(*outargp); | |
45 | req->out.args[0].value = outargp; | |
b93f858a | 46 | fuse_request_send(fc, req); |
fd72faac MS |
47 | err = req->out.h.error; |
48 | fuse_put_request(fc, req); | |
49 | ||
50 | return err; | |
51 | } | |
52 | ||
acf99433 | 53 | struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) |
fd72faac MS |
54 | { |
55 | struct fuse_file *ff; | |
6b2db28a | 56 | |
fd72faac | 57 | ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); |
6b2db28a TH |
58 | if (unlikely(!ff)) |
59 | return NULL; | |
60 | ||
da5e4714 | 61 | ff->fc = fc; |
4250c066 | 62 | ff->reserved_req = fuse_request_alloc(0); |
6b2db28a TH |
63 | if (unlikely(!ff->reserved_req)) { |
64 | kfree(ff); | |
65 | return NULL; | |
fd72faac | 66 | } |
6b2db28a TH |
67 | |
68 | INIT_LIST_HEAD(&ff->write_entry); | |
69 | atomic_set(&ff->count, 0); | |
70 | RB_CLEAR_NODE(&ff->polled_node); | |
71 | init_waitqueue_head(&ff->poll_wait); | |
72 | ||
73 | spin_lock(&fc->lock); | |
74 | ff->kh = ++fc->khctr; | |
75 | spin_unlock(&fc->lock); | |
76 | ||
fd72faac MS |
77 | return ff; |
78 | } | |
79 | ||
80 | void fuse_file_free(struct fuse_file *ff) | |
81 | { | |
33649c91 | 82 | fuse_request_free(ff->reserved_req); |
fd72faac MS |
83 | kfree(ff); |
84 | } | |
85 | ||
c7b7143c | 86 | struct fuse_file *fuse_file_get(struct fuse_file *ff) |
c756e0a4 MS |
87 | { |
88 | atomic_inc(&ff->count); | |
89 | return ff; | |
90 | } | |
91 | ||
5a18ec17 MS |
92 | static void fuse_release_async(struct work_struct *work) |
93 | { | |
94 | struct fuse_req *req; | |
95 | struct fuse_conn *fc; | |
96 | struct path path; | |
97 | ||
98 | req = container_of(work, struct fuse_req, misc.release.work); | |
99 | path = req->misc.release.path; | |
100 | fc = get_fuse_conn(path.dentry->d_inode); | |
101 | ||
102 | fuse_put_request(fc, req); | |
103 | path_put(&path); | |
104 | } | |
105 | ||
819c4b3b MS |
106 | static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) |
107 | { | |
5a18ec17 MS |
108 | if (fc->destroy_req) { |
109 | /* | |
110 | * If this is a fuseblk mount, then it's possible that | |
111 | * releasing the path will result in releasing the | |
112 | * super block and sending the DESTROY request. If | |
113 | * the server is single threaded, this would hang. | |
114 | * For this reason do the path_put() in a separate | |
115 | * thread. | |
116 | */ | |
117 | atomic_inc(&req->count); | |
118 | INIT_WORK(&req->misc.release.work, fuse_release_async); | |
119 | schedule_work(&req->misc.release.work); | |
120 | } else { | |
121 | path_put(&req->misc.release.path); | |
122 | } | |
819c4b3b MS |
123 | } |
124 | ||
5a18ec17 | 125 | static void fuse_file_put(struct fuse_file *ff, bool sync) |
c756e0a4 MS |
126 | { |
127 | if (atomic_dec_and_test(&ff->count)) { | |
128 | struct fuse_req *req = ff->reserved_req; | |
8b0797a4 | 129 | |
5a18ec17 | 130 | if (sync) { |
8b41e671 | 131 | req->background = 0; |
5a18ec17 MS |
132 | fuse_request_send(ff->fc, req); |
133 | path_put(&req->misc.release.path); | |
134 | fuse_put_request(ff->fc, req); | |
135 | } else { | |
136 | req->end = fuse_release_end; | |
8b41e671 | 137 | req->background = 1; |
5a18ec17 MS |
138 | fuse_request_send_background(ff->fc, req); |
139 | } | |
c756e0a4 MS |
140 | kfree(ff); |
141 | } | |
142 | } | |
143 | ||
08cbf542 TH |
144 | int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, |
145 | bool isdir) | |
91fe96b4 MS |
146 | { |
147 | struct fuse_open_out outarg; | |
148 | struct fuse_file *ff; | |
149 | int err; | |
150 | int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; | |
151 | ||
152 | ff = fuse_file_alloc(fc); | |
153 | if (!ff) | |
154 | return -ENOMEM; | |
155 | ||
156 | err = fuse_send_open(fc, nodeid, file, opcode, &outarg); | |
157 | if (err) { | |
158 | fuse_file_free(ff); | |
159 | return err; | |
160 | } | |
161 | ||
162 | if (isdir) | |
163 | outarg.open_flags &= ~FOPEN_DIRECT_IO; | |
164 | ||
165 | ff->fh = outarg.fh; | |
166 | ff->nodeid = nodeid; | |
167 | ff->open_flags = outarg.open_flags; | |
168 | file->private_data = fuse_file_get(ff); | |
169 | ||
170 | return 0; | |
171 | } | |
08cbf542 | 172 | EXPORT_SYMBOL_GPL(fuse_do_open); |
91fe96b4 | 173 | |
c7b7143c | 174 | void fuse_finish_open(struct inode *inode, struct file *file) |
fd72faac | 175 | { |
c7b7143c | 176 | struct fuse_file *ff = file->private_data; |
a0822c55 | 177 | struct fuse_conn *fc = get_fuse_conn(inode); |
c7b7143c MS |
178 | |
179 | if (ff->open_flags & FOPEN_DIRECT_IO) | |
fd72faac | 180 | file->f_op = &fuse_direct_io_file_operations; |
c7b7143c | 181 | if (!(ff->open_flags & FOPEN_KEEP_CACHE)) |
b1009979 | 182 | invalidate_inode_pages2(inode->i_mapping); |
c7b7143c | 183 | if (ff->open_flags & FOPEN_NONSEEKABLE) |
a7c1b990 | 184 | nonseekable_open(inode, file); |
a0822c55 KS |
185 | if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { |
186 | struct fuse_inode *fi = get_fuse_inode(inode); | |
187 | ||
188 | spin_lock(&fc->lock); | |
189 | fi->attr_version = ++fc->attr_version; | |
190 | i_size_write(inode, 0); | |
191 | spin_unlock(&fc->lock); | |
192 | fuse_invalidate_attr(inode); | |
193 | } | |
fd72faac MS |
194 | } |
195 | ||
91fe96b4 | 196 | int fuse_open_common(struct inode *inode, struct file *file, bool isdir) |
fd72faac | 197 | { |
acf99433 | 198 | struct fuse_conn *fc = get_fuse_conn(inode); |
b6aeaded | 199 | int err; |
b6aeaded MS |
200 | |
201 | err = generic_file_open(inode, file); | |
202 | if (err) | |
203 | return err; | |
204 | ||
91fe96b4 | 205 | err = fuse_do_open(fc, get_node_id(inode), file, isdir); |
fd72faac | 206 | if (err) |
91fe96b4 | 207 | return err; |
b6aeaded | 208 | |
91fe96b4 MS |
209 | fuse_finish_open(inode, file); |
210 | ||
211 | return 0; | |
b6aeaded MS |
212 | } |
213 | ||
8b0797a4 | 214 | static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) |
64c6d8ed | 215 | { |
8b0797a4 | 216 | struct fuse_conn *fc = ff->fc; |
33649c91 | 217 | struct fuse_req *req = ff->reserved_req; |
b57d4264 | 218 | struct fuse_release_in *inarg = &req->misc.release.in; |
b6aeaded | 219 | |
8b0797a4 MS |
220 | spin_lock(&fc->lock); |
221 | list_del(&ff->write_entry); | |
222 | if (!RB_EMPTY_NODE(&ff->polled_node)) | |
223 | rb_erase(&ff->polled_node, &fc->polled_files); | |
224 | spin_unlock(&fc->lock); | |
225 | ||
357ccf2b | 226 | wake_up_interruptible_all(&ff->poll_wait); |
8b0797a4 | 227 | |
b6aeaded | 228 | inarg->fh = ff->fh; |
fd72faac | 229 | inarg->flags = flags; |
51eb01e7 | 230 | req->in.h.opcode = opcode; |
c7b7143c | 231 | req->in.h.nodeid = ff->nodeid; |
b6aeaded MS |
232 | req->in.numargs = 1; |
233 | req->in.args[0].size = sizeof(struct fuse_release_in); | |
234 | req->in.args[0].value = inarg; | |
fd72faac MS |
235 | } |
236 | ||
8b0797a4 | 237 | void fuse_release_common(struct file *file, int opcode) |
fd72faac | 238 | { |
6b2db28a TH |
239 | struct fuse_file *ff; |
240 | struct fuse_req *req; | |
b6aeaded | 241 | |
6b2db28a TH |
242 | ff = file->private_data; |
243 | if (unlikely(!ff)) | |
8b0797a4 | 244 | return; |
6b2db28a | 245 | |
6b2db28a | 246 | req = ff->reserved_req; |
8b0797a4 | 247 | fuse_prepare_release(ff, file->f_flags, opcode); |
6b2db28a | 248 | |
37fb3a30 MS |
249 | if (ff->flock) { |
250 | struct fuse_release_in *inarg = &req->misc.release.in; | |
251 | inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; | |
252 | inarg->lock_owner = fuse_lock_owner_id(ff->fc, | |
253 | (fl_owner_t) file); | |
254 | } | |
6b2db28a | 255 | /* Hold vfsmount and dentry until release is finished */ |
b0be46eb MS |
256 | path_get(&file->f_path); |
257 | req->misc.release.path = file->f_path; | |
6b2db28a | 258 | |
6b2db28a TH |
259 | /* |
260 | * Normally this will send the RELEASE request, however if | |
261 | * some asynchronous READ or WRITE requests are outstanding, | |
262 | * the sending will be delayed. | |
5a18ec17 MS |
263 | * |
264 | * Make the release synchronous if this is a fuseblk mount, | |
265 | * synchronous RELEASE is allowed (and desirable) in this case | |
266 | * because the server can be trusted not to screw up. | |
6b2db28a | 267 | */ |
5a18ec17 | 268 | fuse_file_put(ff, ff->fc->destroy_req != NULL); |
b6aeaded MS |
269 | } |
270 | ||
04730fef MS |
271 | static int fuse_open(struct inode *inode, struct file *file) |
272 | { | |
91fe96b4 | 273 | return fuse_open_common(inode, file, false); |
04730fef MS |
274 | } |
275 | ||
276 | static int fuse_release(struct inode *inode, struct file *file) | |
277 | { | |
8b0797a4 MS |
278 | fuse_release_common(file, FUSE_RELEASE); |
279 | ||
280 | /* return value is ignored by VFS */ | |
281 | return 0; | |
282 | } | |
283 | ||
284 | void fuse_sync_release(struct fuse_file *ff, int flags) | |
285 | { | |
286 | WARN_ON(atomic_read(&ff->count) > 1); | |
287 | fuse_prepare_release(ff, flags, FUSE_RELEASE); | |
288 | ff->reserved_req->force = 1; | |
8b41e671 | 289 | ff->reserved_req->background = 0; |
8b0797a4 MS |
290 | fuse_request_send(ff->fc, ff->reserved_req); |
291 | fuse_put_request(ff->fc, ff->reserved_req); | |
292 | kfree(ff); | |
04730fef | 293 | } |
08cbf542 | 294 | EXPORT_SYMBOL_GPL(fuse_sync_release); |
04730fef | 295 | |
71421259 | 296 | /* |
9c8ef561 MS |
297 | * Scramble the ID space with XTEA, so that the value of the files_struct |
298 | * pointer is not exposed to userspace. | |
71421259 | 299 | */ |
f3332114 | 300 | u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) |
71421259 | 301 | { |
9c8ef561 MS |
302 | u32 *k = fc->scramble_key; |
303 | u64 v = (unsigned long) id; | |
304 | u32 v0 = v; | |
305 | u32 v1 = v >> 32; | |
306 | u32 sum = 0; | |
307 | int i; | |
308 | ||
309 | for (i = 0; i < 32; i++) { | |
310 | v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); | |
311 | sum += 0x9E3779B9; | |
312 | v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); | |
313 | } | |
314 | ||
315 | return (u64) v0 + ((u64) v1 << 32); | |
71421259 MS |
316 | } |
317 | ||
3be5a52b MS |
318 | /* |
319 | * Check if page is under writeback | |
320 | * | |
321 | * This is currently done by walking the list of writepage requests | |
322 | * for the inode, which can be pretty inefficient. | |
323 | */ | |
324 | static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) | |
325 | { | |
326 | struct fuse_conn *fc = get_fuse_conn(inode); | |
327 | struct fuse_inode *fi = get_fuse_inode(inode); | |
328 | struct fuse_req *req; | |
329 | bool found = false; | |
330 | ||
331 | spin_lock(&fc->lock); | |
332 | list_for_each_entry(req, &fi->writepages, writepages_entry) { | |
333 | pgoff_t curr_index; | |
334 | ||
335 | BUG_ON(req->inode != inode); | |
336 | curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; | |
385b1268 PE |
337 | if (curr_index <= index && |
338 | index < curr_index + req->num_pages) { | |
3be5a52b MS |
339 | found = true; |
340 | break; | |
341 | } | |
342 | } | |
343 | spin_unlock(&fc->lock); | |
344 | ||
345 | return found; | |
346 | } | |
347 | ||
348 | /* | |
349 | * Wait for page writeback to be completed. | |
350 | * | |
351 | * Since fuse doesn't rely on the VM writeback tracking, this has to | |
352 | * use some other means. | |
353 | */ | |
354 | static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) | |
355 | { | |
356 | struct fuse_inode *fi = get_fuse_inode(inode); | |
357 | ||
358 | wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); | |
359 | return 0; | |
360 | } | |
361 | ||
75e1fcc0 | 362 | static int fuse_flush(struct file *file, fl_owner_t id) |
b6aeaded | 363 | { |
6131ffaa | 364 | struct inode *inode = file_inode(file); |
b6aeaded MS |
365 | struct fuse_conn *fc = get_fuse_conn(inode); |
366 | struct fuse_file *ff = file->private_data; | |
367 | struct fuse_req *req; | |
368 | struct fuse_flush_in inarg; | |
369 | int err; | |
370 | ||
248d86e8 MS |
371 | if (is_bad_inode(inode)) |
372 | return -EIO; | |
373 | ||
b6aeaded MS |
374 | if (fc->no_flush) |
375 | return 0; | |
376 | ||
b111c8c0 | 377 | req = fuse_get_req_nofail_nopages(fc, file); |
b6aeaded MS |
378 | memset(&inarg, 0, sizeof(inarg)); |
379 | inarg.fh = ff->fh; | |
9c8ef561 | 380 | inarg.lock_owner = fuse_lock_owner_id(fc, id); |
b6aeaded MS |
381 | req->in.h.opcode = FUSE_FLUSH; |
382 | req->in.h.nodeid = get_node_id(inode); | |
b6aeaded MS |
383 | req->in.numargs = 1; |
384 | req->in.args[0].size = sizeof(inarg); | |
385 | req->in.args[0].value = &inarg; | |
71421259 | 386 | req->force = 1; |
b93f858a | 387 | fuse_request_send(fc, req); |
b6aeaded MS |
388 | err = req->out.h.error; |
389 | fuse_put_request(fc, req); | |
390 | if (err == -ENOSYS) { | |
391 | fc->no_flush = 1; | |
392 | err = 0; | |
393 | } | |
394 | return err; | |
395 | } | |
396 | ||
3be5a52b MS |
397 | /* |
398 | * Wait for all pending writepages on the inode to finish. | |
399 | * | |
400 | * This is currently done by blocking further writes with FUSE_NOWRITE | |
401 | * and waiting for all sent writes to complete. | |
402 | * | |
403 | * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage | |
404 | * could conflict with truncation. | |
405 | */ | |
406 | static void fuse_sync_writes(struct inode *inode) | |
407 | { | |
408 | fuse_set_nowrite(inode); | |
409 | fuse_release_nowrite(inode); | |
410 | } | |
411 | ||
02c24a82 JB |
412 | int fuse_fsync_common(struct file *file, loff_t start, loff_t end, |
413 | int datasync, int isdir) | |
b6aeaded | 414 | { |
7ea80859 | 415 | struct inode *inode = file->f_mapping->host; |
b6aeaded MS |
416 | struct fuse_conn *fc = get_fuse_conn(inode); |
417 | struct fuse_file *ff = file->private_data; | |
418 | struct fuse_req *req; | |
419 | struct fuse_fsync_in inarg; | |
420 | int err; | |
421 | ||
248d86e8 MS |
422 | if (is_bad_inode(inode)) |
423 | return -EIO; | |
424 | ||
02c24a82 JB |
425 | err = filemap_write_and_wait_range(inode->i_mapping, start, end); |
426 | if (err) | |
427 | return err; | |
428 | ||
82547981 | 429 | if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) |
b6aeaded MS |
430 | return 0; |
431 | ||
02c24a82 JB |
432 | mutex_lock(&inode->i_mutex); |
433 | ||
3be5a52b MS |
434 | /* |
435 | * Start writeback against all dirty pages of the inode, then | |
436 | * wait for all outstanding writes, before sending the FSYNC | |
437 | * request. | |
438 | */ | |
439 | err = write_inode_now(inode, 0); | |
440 | if (err) | |
02c24a82 | 441 | goto out; |
3be5a52b MS |
442 | |
443 | fuse_sync_writes(inode); | |
444 | ||
b111c8c0 | 445 | req = fuse_get_req_nopages(fc); |
02c24a82 JB |
446 | if (IS_ERR(req)) { |
447 | err = PTR_ERR(req); | |
448 | goto out; | |
449 | } | |
b6aeaded MS |
450 | |
451 | memset(&inarg, 0, sizeof(inarg)); | |
452 | inarg.fh = ff->fh; | |
453 | inarg.fsync_flags = datasync ? 1 : 0; | |
82547981 | 454 | req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; |
b6aeaded | 455 | req->in.h.nodeid = get_node_id(inode); |
b6aeaded MS |
456 | req->in.numargs = 1; |
457 | req->in.args[0].size = sizeof(inarg); | |
458 | req->in.args[0].value = &inarg; | |
b93f858a | 459 | fuse_request_send(fc, req); |
b6aeaded MS |
460 | err = req->out.h.error; |
461 | fuse_put_request(fc, req); | |
462 | if (err == -ENOSYS) { | |
82547981 MS |
463 | if (isdir) |
464 | fc->no_fsyncdir = 1; | |
465 | else | |
466 | fc->no_fsync = 1; | |
b6aeaded MS |
467 | err = 0; |
468 | } | |
02c24a82 JB |
469 | out: |
470 | mutex_unlock(&inode->i_mutex); | |
b6aeaded MS |
471 | return err; |
472 | } | |
473 | ||
02c24a82 JB |
474 | static int fuse_fsync(struct file *file, loff_t start, loff_t end, |
475 | int datasync) | |
82547981 | 476 | { |
02c24a82 | 477 | return fuse_fsync_common(file, start, end, datasync, 0); |
82547981 MS |
478 | } |
479 | ||
2106cb18 MS |
480 | void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, |
481 | size_t count, int opcode) | |
b6aeaded | 482 | { |
5c5c5e51 | 483 | struct fuse_read_in *inarg = &req->misc.read.in; |
a6643094 | 484 | struct fuse_file *ff = file->private_data; |
b6aeaded | 485 | |
361b1eb5 MS |
486 | inarg->fh = ff->fh; |
487 | inarg->offset = pos; | |
488 | inarg->size = count; | |
a6643094 | 489 | inarg->flags = file->f_flags; |
361b1eb5 | 490 | req->in.h.opcode = opcode; |
2106cb18 | 491 | req->in.h.nodeid = ff->nodeid; |
b6aeaded MS |
492 | req->in.numargs = 1; |
493 | req->in.args[0].size = sizeof(struct fuse_read_in); | |
c1aa96a5 | 494 | req->in.args[0].value = inarg; |
b6aeaded MS |
495 | req->out.argvar = 1; |
496 | req->out.numargs = 1; | |
497 | req->out.args[0].size = count; | |
b6aeaded MS |
498 | } |
499 | ||
187c5c36 MP |
500 | static void fuse_release_user_pages(struct fuse_req *req, int write) |
501 | { | |
502 | unsigned i; | |
503 | ||
504 | for (i = 0; i < req->num_pages; i++) { | |
505 | struct page *page = req->pages[i]; | |
506 | if (write) | |
507 | set_page_dirty_lock(page); | |
508 | put_page(page); | |
509 | } | |
510 | } | |
511 | ||
01e9d11a MP |
512 | /** |
513 | * In case of short read, the caller sets 'pos' to the position of | |
514 | * actual end of fuse request in IO request. Otherwise, if bytes_requested | |
515 | * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1. | |
516 | * | |
517 | * An example: | |
518 | * User requested DIO read of 64K. It was splitted into two 32K fuse requests, | |
519 | * both submitted asynchronously. The first of them was ACKed by userspace as | |
520 | * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The | |
521 | * second request was ACKed as short, e.g. only 1K was read, resulting in | |
522 | * pos == 33K. | |
523 | * | |
524 | * Thus, when all fuse requests are completed, the minimal non-negative 'pos' | |
525 | * will be equal to the length of the longest contiguous fragment of | |
526 | * transferred data starting from the beginning of IO request. | |
527 | */ | |
528 | static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) | |
529 | { | |
530 | int left; | |
531 | ||
532 | spin_lock(&io->lock); | |
533 | if (err) | |
534 | io->err = io->err ? : err; | |
535 | else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes)) | |
536 | io->bytes = pos; | |
537 | ||
538 | left = --io->reqs; | |
539 | spin_unlock(&io->lock); | |
540 | ||
541 | if (!left) { | |
542 | long res; | |
543 | ||
544 | if (io->err) | |
545 | res = io->err; | |
546 | else if (io->bytes >= 0 && io->write) | |
547 | res = -EIO; | |
548 | else { | |
549 | res = io->bytes < 0 ? io->size : io->bytes; | |
550 | ||
551 | if (!is_sync_kiocb(io->iocb)) { | |
cb5e05d1 | 552 | struct inode *inode = file_inode(io->iocb->ki_filp); |
01e9d11a MP |
553 | struct fuse_conn *fc = get_fuse_conn(inode); |
554 | struct fuse_inode *fi = get_fuse_inode(inode); | |
555 | ||
556 | spin_lock(&fc->lock); | |
557 | fi->attr_version = ++fc->attr_version; | |
558 | spin_unlock(&fc->lock); | |
559 | } | |
560 | } | |
561 | ||
562 | aio_complete(io->iocb, res, 0); | |
563 | kfree(io); | |
564 | } | |
565 | } | |
566 | ||
567 | static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req) | |
568 | { | |
569 | struct fuse_io_priv *io = req->io; | |
570 | ssize_t pos = -1; | |
571 | ||
572 | fuse_release_user_pages(req, !io->write); | |
573 | ||
574 | if (io->write) { | |
575 | if (req->misc.write.in.size != req->misc.write.out.size) | |
576 | pos = req->misc.write.in.offset - io->offset + | |
577 | req->misc.write.out.size; | |
578 | } else { | |
579 | if (req->misc.read.in.size != req->out.args[0].size) | |
580 | pos = req->misc.read.in.offset - io->offset + | |
581 | req->out.args[0].size; | |
582 | } | |
583 | ||
584 | fuse_aio_complete(io, req->out.h.error, pos); | |
585 | } | |
586 | ||
587 | static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req, | |
588 | size_t num_bytes, struct fuse_io_priv *io) | |
589 | { | |
590 | spin_lock(&io->lock); | |
591 | io->size += num_bytes; | |
592 | io->reqs++; | |
593 | spin_unlock(&io->lock); | |
594 | ||
595 | req->io = io; | |
596 | req->end = fuse_aio_complete_req; | |
597 | ||
36cf66ed | 598 | __fuse_get_request(req); |
01e9d11a MP |
599 | fuse_request_send_background(fc, req); |
600 | ||
601 | return num_bytes; | |
602 | } | |
603 | ||
36cf66ed | 604 | static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io, |
2106cb18 | 605 | loff_t pos, size_t count, fl_owner_t owner) |
04730fef | 606 | { |
36cf66ed | 607 | struct file *file = io->file; |
2106cb18 MS |
608 | struct fuse_file *ff = file->private_data; |
609 | struct fuse_conn *fc = ff->fc; | |
f3332114 | 610 | |
2106cb18 | 611 | fuse_read_fill(req, file, pos, count, FUSE_READ); |
f3332114 | 612 | if (owner != NULL) { |
5c5c5e51 | 613 | struct fuse_read_in *inarg = &req->misc.read.in; |
f3332114 MS |
614 | |
615 | inarg->read_flags |= FUSE_READ_LOCKOWNER; | |
616 | inarg->lock_owner = fuse_lock_owner_id(fc, owner); | |
617 | } | |
36cf66ed MP |
618 | |
619 | if (io->async) | |
620 | return fuse_async_req_send(fc, req, count, io); | |
621 | ||
b93f858a | 622 | fuse_request_send(fc, req); |
361b1eb5 | 623 | return req->out.args[0].size; |
04730fef MS |
624 | } |
625 | ||
5c5c5e51 MS |
626 | static void fuse_read_update_size(struct inode *inode, loff_t size, |
627 | u64 attr_ver) | |
628 | { | |
629 | struct fuse_conn *fc = get_fuse_conn(inode); | |
630 | struct fuse_inode *fi = get_fuse_inode(inode); | |
631 | ||
632 | spin_lock(&fc->lock); | |
06a7c3c2 MP |
633 | if (attr_ver == fi->attr_version && size < inode->i_size && |
634 | !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) { | |
5c5c5e51 MS |
635 | fi->attr_version = ++fc->attr_version; |
636 | i_size_write(inode, size); | |
637 | } | |
638 | spin_unlock(&fc->lock); | |
639 | } | |
640 | ||
b6aeaded MS |
641 | static int fuse_readpage(struct file *file, struct page *page) |
642 | { | |
36cf66ed | 643 | struct fuse_io_priv io = { .async = 0, .file = file }; |
b6aeaded MS |
644 | struct inode *inode = page->mapping->host; |
645 | struct fuse_conn *fc = get_fuse_conn(inode); | |
248d86e8 | 646 | struct fuse_req *req; |
5c5c5e51 MS |
647 | size_t num_read; |
648 | loff_t pos = page_offset(page); | |
649 | size_t count = PAGE_CACHE_SIZE; | |
650 | u64 attr_ver; | |
248d86e8 MS |
651 | int err; |
652 | ||
653 | err = -EIO; | |
654 | if (is_bad_inode(inode)) | |
655 | goto out; | |
656 | ||
3be5a52b | 657 | /* |
25985edc | 658 | * Page writeback can extend beyond the lifetime of the |
3be5a52b MS |
659 | * page-cache page, so make sure we read a properly synced |
660 | * page. | |
661 | */ | |
662 | fuse_wait_on_page_writeback(inode, page->index); | |
663 | ||
b111c8c0 | 664 | req = fuse_get_req(fc, 1); |
ce1d5a49 MS |
665 | err = PTR_ERR(req); |
666 | if (IS_ERR(req)) | |
b6aeaded MS |
667 | goto out; |
668 | ||
5c5c5e51 MS |
669 | attr_ver = fuse_get_attr_version(fc); |
670 | ||
b6aeaded | 671 | req->out.page_zeroing = 1; |
f4975c67 | 672 | req->out.argpages = 1; |
b6aeaded MS |
673 | req->num_pages = 1; |
674 | req->pages[0] = page; | |
85f40aec | 675 | req->page_descs[0].length = count; |
36cf66ed | 676 | num_read = fuse_send_read(req, &io, pos, count, NULL); |
b6aeaded MS |
677 | err = req->out.h.error; |
678 | fuse_put_request(fc, req); | |
5c5c5e51 MS |
679 | |
680 | if (!err) { | |
681 | /* | |
682 | * Short read means EOF. If file size is larger, truncate it | |
683 | */ | |
684 | if (num_read < count) | |
685 | fuse_read_update_size(inode, pos + num_read, attr_ver); | |
686 | ||
b6aeaded | 687 | SetPageUptodate(page); |
5c5c5e51 MS |
688 | } |
689 | ||
b36c31ba | 690 | fuse_invalidate_attr(inode); /* atime changed */ |
b6aeaded MS |
691 | out: |
692 | unlock_page(page); | |
693 | return err; | |
694 | } | |
695 | ||
c1aa96a5 | 696 | static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) |
db50b96c | 697 | { |
c1aa96a5 | 698 | int i; |
5c5c5e51 MS |
699 | size_t count = req->misc.read.in.size; |
700 | size_t num_read = req->out.args[0].size; | |
ce534fb0 | 701 | struct address_space *mapping = NULL; |
c1aa96a5 | 702 | |
ce534fb0 MS |
703 | for (i = 0; mapping == NULL && i < req->num_pages; i++) |
704 | mapping = req->pages[i]->mapping; | |
5c5c5e51 | 705 | |
ce534fb0 MS |
706 | if (mapping) { |
707 | struct inode *inode = mapping->host; | |
708 | ||
709 | /* | |
710 | * Short read means EOF. If file size is larger, truncate it | |
711 | */ | |
712 | if (!req->out.h.error && num_read < count) { | |
713 | loff_t pos; | |
714 | ||
715 | pos = page_offset(req->pages[0]) + num_read; | |
716 | fuse_read_update_size(inode, pos, | |
717 | req->misc.read.attr_ver); | |
718 | } | |
719 | fuse_invalidate_attr(inode); /* atime changed */ | |
720 | } | |
c1aa96a5 | 721 | |
db50b96c MS |
722 | for (i = 0; i < req->num_pages; i++) { |
723 | struct page *page = req->pages[i]; | |
724 | if (!req->out.h.error) | |
725 | SetPageUptodate(page); | |
c1aa96a5 MS |
726 | else |
727 | SetPageError(page); | |
db50b96c | 728 | unlock_page(page); |
b5dd3285 | 729 | page_cache_release(page); |
db50b96c | 730 | } |
c756e0a4 | 731 | if (req->ff) |
5a18ec17 | 732 | fuse_file_put(req->ff, false); |
c1aa96a5 MS |
733 | } |
734 | ||
2106cb18 | 735 | static void fuse_send_readpages(struct fuse_req *req, struct file *file) |
c1aa96a5 | 736 | { |
2106cb18 MS |
737 | struct fuse_file *ff = file->private_data; |
738 | struct fuse_conn *fc = ff->fc; | |
c1aa96a5 MS |
739 | loff_t pos = page_offset(req->pages[0]); |
740 | size_t count = req->num_pages << PAGE_CACHE_SHIFT; | |
f4975c67 MS |
741 | |
742 | req->out.argpages = 1; | |
c1aa96a5 | 743 | req->out.page_zeroing = 1; |
ce534fb0 | 744 | req->out.page_replace = 1; |
2106cb18 | 745 | fuse_read_fill(req, file, pos, count, FUSE_READ); |
5c5c5e51 | 746 | req->misc.read.attr_ver = fuse_get_attr_version(fc); |
9cd68455 | 747 | if (fc->async_read) { |
c756e0a4 | 748 | req->ff = fuse_file_get(ff); |
9cd68455 | 749 | req->end = fuse_readpages_end; |
b93f858a | 750 | fuse_request_send_background(fc, req); |
9cd68455 | 751 | } else { |
b93f858a | 752 | fuse_request_send(fc, req); |
9cd68455 | 753 | fuse_readpages_end(fc, req); |
e9bb09dd | 754 | fuse_put_request(fc, req); |
9cd68455 | 755 | } |
db50b96c MS |
756 | } |
757 | ||
c756e0a4 | 758 | struct fuse_fill_data { |
db50b96c | 759 | struct fuse_req *req; |
a6643094 | 760 | struct file *file; |
db50b96c | 761 | struct inode *inode; |
f8dbdf81 | 762 | unsigned nr_pages; |
db50b96c MS |
763 | }; |
764 | ||
765 | static int fuse_readpages_fill(void *_data, struct page *page) | |
766 | { | |
c756e0a4 | 767 | struct fuse_fill_data *data = _data; |
db50b96c MS |
768 | struct fuse_req *req = data->req; |
769 | struct inode *inode = data->inode; | |
770 | struct fuse_conn *fc = get_fuse_conn(inode); | |
771 | ||
3be5a52b MS |
772 | fuse_wait_on_page_writeback(inode, page->index); |
773 | ||
db50b96c MS |
774 | if (req->num_pages && |
775 | (req->num_pages == FUSE_MAX_PAGES_PER_REQ || | |
776 | (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || | |
777 | req->pages[req->num_pages - 1]->index + 1 != page->index)) { | |
f8dbdf81 MP |
778 | int nr_alloc = min_t(unsigned, data->nr_pages, |
779 | FUSE_MAX_PAGES_PER_REQ); | |
2106cb18 | 780 | fuse_send_readpages(req, data->file); |
8b41e671 MP |
781 | if (fc->async_read) |
782 | req = fuse_get_req_for_background(fc, nr_alloc); | |
783 | else | |
784 | req = fuse_get_req(fc, nr_alloc); | |
785 | ||
786 | data->req = req; | |
ce1d5a49 | 787 | if (IS_ERR(req)) { |
db50b96c | 788 | unlock_page(page); |
ce1d5a49 | 789 | return PTR_ERR(req); |
db50b96c | 790 | } |
db50b96c | 791 | } |
f8dbdf81 MP |
792 | |
793 | if (WARN_ON(req->num_pages >= req->max_pages)) { | |
794 | fuse_put_request(fc, req); | |
795 | return -EIO; | |
796 | } | |
797 | ||
b5dd3285 | 798 | page_cache_get(page); |
db50b96c | 799 | req->pages[req->num_pages] = page; |
85f40aec | 800 | req->page_descs[req->num_pages].length = PAGE_SIZE; |
1729a16c | 801 | req->num_pages++; |
f8dbdf81 | 802 | data->nr_pages--; |
db50b96c MS |
803 | return 0; |
804 | } | |
805 | ||
806 | static int fuse_readpages(struct file *file, struct address_space *mapping, | |
807 | struct list_head *pages, unsigned nr_pages) | |
808 | { | |
809 | struct inode *inode = mapping->host; | |
810 | struct fuse_conn *fc = get_fuse_conn(inode); | |
c756e0a4 | 811 | struct fuse_fill_data data; |
db50b96c | 812 | int err; |
f8dbdf81 | 813 | int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ); |
248d86e8 | 814 | |
1d7ea732 | 815 | err = -EIO; |
248d86e8 | 816 | if (is_bad_inode(inode)) |
2e990021 | 817 | goto out; |
248d86e8 | 818 | |
a6643094 | 819 | data.file = file; |
db50b96c | 820 | data.inode = inode; |
8b41e671 MP |
821 | if (fc->async_read) |
822 | data.req = fuse_get_req_for_background(fc, nr_alloc); | |
823 | else | |
824 | data.req = fuse_get_req(fc, nr_alloc); | |
f8dbdf81 | 825 | data.nr_pages = nr_pages; |
1d7ea732 | 826 | err = PTR_ERR(data.req); |
ce1d5a49 | 827 | if (IS_ERR(data.req)) |
2e990021 | 828 | goto out; |
db50b96c MS |
829 | |
830 | err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); | |
d3406ffa MS |
831 | if (!err) { |
832 | if (data.req->num_pages) | |
2106cb18 | 833 | fuse_send_readpages(data.req, file); |
d3406ffa MS |
834 | else |
835 | fuse_put_request(fc, data.req); | |
836 | } | |
2e990021 | 837 | out: |
1d7ea732 | 838 | return err; |
db50b96c MS |
839 | } |
840 | ||
bcb4be80 MS |
841 | static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, |
842 | unsigned long nr_segs, loff_t pos) | |
843 | { | |
844 | struct inode *inode = iocb->ki_filp->f_mapping->host; | |
a8894274 | 845 | struct fuse_conn *fc = get_fuse_conn(inode); |
bcb4be80 | 846 | |
a8894274 BF |
847 | /* |
848 | * In auto invalidate mode, always update attributes on read. | |
849 | * Otherwise, only update if we attempt to read past EOF (to ensure | |
850 | * i_size is up to date). | |
851 | */ | |
852 | if (fc->auto_inval_data || | |
853 | (pos + iov_length(iov, nr_segs) > i_size_read(inode))) { | |
bcb4be80 | 854 | int err; |
bcb4be80 MS |
855 | err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); |
856 | if (err) | |
857 | return err; | |
858 | } | |
859 | ||
860 | return generic_file_aio_read(iocb, iov, nr_segs, pos); | |
861 | } | |
862 | ||
2d698b07 | 863 | static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, |
2106cb18 | 864 | loff_t pos, size_t count) |
b6aeaded | 865 | { |
b25e82e5 MS |
866 | struct fuse_write_in *inarg = &req->misc.write.in; |
867 | struct fuse_write_out *outarg = &req->misc.write.out; | |
b6aeaded | 868 | |
b25e82e5 MS |
869 | inarg->fh = ff->fh; |
870 | inarg->offset = pos; | |
871 | inarg->size = count; | |
b6aeaded | 872 | req->in.h.opcode = FUSE_WRITE; |
2106cb18 | 873 | req->in.h.nodeid = ff->nodeid; |
b6aeaded | 874 | req->in.numargs = 2; |
2106cb18 | 875 | if (ff->fc->minor < 9) |
f3332114 MS |
876 | req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; |
877 | else | |
878 | req->in.args[0].size = sizeof(struct fuse_write_in); | |
b25e82e5 | 879 | req->in.args[0].value = inarg; |
b6aeaded MS |
880 | req->in.args[1].size = count; |
881 | req->out.numargs = 1; | |
882 | req->out.args[0].size = sizeof(struct fuse_write_out); | |
b25e82e5 MS |
883 | req->out.args[0].value = outarg; |
884 | } | |
885 | ||
36cf66ed | 886 | static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io, |
2106cb18 | 887 | loff_t pos, size_t count, fl_owner_t owner) |
b25e82e5 | 888 | { |
36cf66ed | 889 | struct file *file = io->file; |
2106cb18 MS |
890 | struct fuse_file *ff = file->private_data; |
891 | struct fuse_conn *fc = ff->fc; | |
2d698b07 MS |
892 | struct fuse_write_in *inarg = &req->misc.write.in; |
893 | ||
2106cb18 | 894 | fuse_write_fill(req, ff, pos, count); |
2d698b07 | 895 | inarg->flags = file->f_flags; |
f3332114 | 896 | if (owner != NULL) { |
f3332114 MS |
897 | inarg->write_flags |= FUSE_WRITE_LOCKOWNER; |
898 | inarg->lock_owner = fuse_lock_owner_id(fc, owner); | |
899 | } | |
36cf66ed MP |
900 | |
901 | if (io->async) | |
902 | return fuse_async_req_send(fc, req, count, io); | |
903 | ||
b93f858a | 904 | fuse_request_send(fc, req); |
b25e82e5 | 905 | return req->misc.write.out.size; |
b6aeaded MS |
906 | } |
907 | ||
a1d75f25 | 908 | void fuse_write_update_size(struct inode *inode, loff_t pos) |
854512ec MS |
909 | { |
910 | struct fuse_conn *fc = get_fuse_conn(inode); | |
911 | struct fuse_inode *fi = get_fuse_inode(inode); | |
912 | ||
913 | spin_lock(&fc->lock); | |
914 | fi->attr_version = ++fc->attr_version; | |
915 | if (pos > inode->i_size) | |
916 | i_size_write(inode, pos); | |
917 | spin_unlock(&fc->lock); | |
918 | } | |
919 | ||
ea9b9907 NP |
920 | static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, |
921 | struct inode *inode, loff_t pos, | |
922 | size_t count) | |
923 | { | |
924 | size_t res; | |
925 | unsigned offset; | |
926 | unsigned i; | |
36cf66ed | 927 | struct fuse_io_priv io = { .async = 0, .file = file }; |
ea9b9907 NP |
928 | |
929 | for (i = 0; i < req->num_pages; i++) | |
930 | fuse_wait_on_page_writeback(inode, req->pages[i]->index); | |
931 | ||
36cf66ed | 932 | res = fuse_send_write(req, &io, pos, count, NULL); |
ea9b9907 | 933 | |
b2430d75 | 934 | offset = req->page_descs[0].offset; |
ea9b9907 NP |
935 | count = res; |
936 | for (i = 0; i < req->num_pages; i++) { | |
937 | struct page *page = req->pages[i]; | |
938 | ||
939 | if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) | |
940 | SetPageUptodate(page); | |
941 | ||
942 | if (count > PAGE_CACHE_SIZE - offset) | |
943 | count -= PAGE_CACHE_SIZE - offset; | |
944 | else | |
945 | count = 0; | |
946 | offset = 0; | |
947 | ||
948 | unlock_page(page); | |
949 | page_cache_release(page); | |
950 | } | |
951 | ||
952 | return res; | |
953 | } | |
954 | ||
955 | static ssize_t fuse_fill_write_pages(struct fuse_req *req, | |
956 | struct address_space *mapping, | |
957 | struct iov_iter *ii, loff_t pos) | |
958 | { | |
959 | struct fuse_conn *fc = get_fuse_conn(mapping->host); | |
960 | unsigned offset = pos & (PAGE_CACHE_SIZE - 1); | |
961 | size_t count = 0; | |
962 | int err; | |
963 | ||
f4975c67 | 964 | req->in.argpages = 1; |
b2430d75 | 965 | req->page_descs[0].offset = offset; |
ea9b9907 NP |
966 | |
967 | do { | |
968 | size_t tmp; | |
969 | struct page *page; | |
970 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | |
971 | size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, | |
972 | iov_iter_count(ii)); | |
973 | ||
974 | bytes = min_t(size_t, bytes, fc->max_write - count); | |
975 | ||
976 | again: | |
977 | err = -EFAULT; | |
978 | if (iov_iter_fault_in_readable(ii, bytes)) | |
979 | break; | |
980 | ||
981 | err = -ENOMEM; | |
54566b2c | 982 | page = grab_cache_page_write_begin(mapping, index, 0); |
ea9b9907 NP |
983 | if (!page) |
984 | break; | |
985 | ||
931e80e4 | 986 | if (mapping_writably_mapped(mapping)) |
987 | flush_dcache_page(page); | |
988 | ||
ea9b9907 NP |
989 | pagefault_disable(); |
990 | tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); | |
991 | pagefault_enable(); | |
992 | flush_dcache_page(page); | |
993 | ||
478e0841 JW |
994 | mark_page_accessed(page); |
995 | ||
ea9b9907 NP |
996 | if (!tmp) { |
997 | unlock_page(page); | |
998 | page_cache_release(page); | |
999 | bytes = min(bytes, iov_iter_single_seg_count(ii)); | |
1000 | goto again; | |
1001 | } | |
1002 | ||
1003 | err = 0; | |
1004 | req->pages[req->num_pages] = page; | |
85f40aec | 1005 | req->page_descs[req->num_pages].length = tmp; |
ea9b9907 NP |
1006 | req->num_pages++; |
1007 | ||
1008 | iov_iter_advance(ii, tmp); | |
1009 | count += tmp; | |
1010 | pos += tmp; | |
1011 | offset += tmp; | |
1012 | if (offset == PAGE_CACHE_SIZE) | |
1013 | offset = 0; | |
1014 | ||
78bb6cb9 MS |
1015 | if (!fc->big_writes) |
1016 | break; | |
ea9b9907 | 1017 | } while (iov_iter_count(ii) && count < fc->max_write && |
d07f09f5 | 1018 | req->num_pages < req->max_pages && offset == 0); |
ea9b9907 NP |
1019 | |
1020 | return count > 0 ? count : err; | |
1021 | } | |
1022 | ||
d07f09f5 MP |
1023 | static inline unsigned fuse_wr_pages(loff_t pos, size_t len) |
1024 | { | |
1025 | return min_t(unsigned, | |
1026 | ((pos + len - 1) >> PAGE_CACHE_SHIFT) - | |
1027 | (pos >> PAGE_CACHE_SHIFT) + 1, | |
1028 | FUSE_MAX_PAGES_PER_REQ); | |
1029 | } | |
1030 | ||
ea9b9907 NP |
1031 | static ssize_t fuse_perform_write(struct file *file, |
1032 | struct address_space *mapping, | |
1033 | struct iov_iter *ii, loff_t pos) | |
1034 | { | |
1035 | struct inode *inode = mapping->host; | |
1036 | struct fuse_conn *fc = get_fuse_conn(inode); | |
06a7c3c2 | 1037 | struct fuse_inode *fi = get_fuse_inode(inode); |
ea9b9907 NP |
1038 | int err = 0; |
1039 | ssize_t res = 0; | |
1040 | ||
1041 | if (is_bad_inode(inode)) | |
1042 | return -EIO; | |
1043 | ||
06a7c3c2 MP |
1044 | if (inode->i_size < pos + iov_iter_count(ii)) |
1045 | set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); | |
1046 | ||
ea9b9907 NP |
1047 | do { |
1048 | struct fuse_req *req; | |
1049 | ssize_t count; | |
d07f09f5 | 1050 | unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii)); |
ea9b9907 | 1051 | |
d07f09f5 | 1052 | req = fuse_get_req(fc, nr_pages); |
ea9b9907 NP |
1053 | if (IS_ERR(req)) { |
1054 | err = PTR_ERR(req); | |
1055 | break; | |
1056 | } | |
1057 | ||
1058 | count = fuse_fill_write_pages(req, mapping, ii, pos); | |
1059 | if (count <= 0) { | |
1060 | err = count; | |
1061 | } else { | |
1062 | size_t num_written; | |
1063 | ||
1064 | num_written = fuse_send_write_pages(req, file, inode, | |
1065 | pos, count); | |
1066 | err = req->out.h.error; | |
1067 | if (!err) { | |
1068 | res += num_written; | |
1069 | pos += num_written; | |
1070 | ||
1071 | /* break out of the loop on short write */ | |
1072 | if (num_written != count) | |
1073 | err = -EIO; | |
1074 | } | |
1075 | } | |
1076 | fuse_put_request(fc, req); | |
1077 | } while (!err && iov_iter_count(ii)); | |
1078 | ||
1079 | if (res > 0) | |
1080 | fuse_write_update_size(inode, pos); | |
1081 | ||
06a7c3c2 | 1082 | clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); |
ea9b9907 NP |
1083 | fuse_invalidate_attr(inode); |
1084 | ||
1085 | return res > 0 ? res : err; | |
1086 | } | |
1087 | ||
1088 | static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | |
1089 | unsigned long nr_segs, loff_t pos) | |
1090 | { | |
1091 | struct file *file = iocb->ki_filp; | |
1092 | struct address_space *mapping = file->f_mapping; | |
1093 | size_t count = 0; | |
4273b793 | 1094 | size_t ocount = 0; |
ea9b9907 | 1095 | ssize_t written = 0; |
4273b793 | 1096 | ssize_t written_buffered = 0; |
ea9b9907 NP |
1097 | struct inode *inode = mapping->host; |
1098 | ssize_t err; | |
1099 | struct iov_iter i; | |
4273b793 | 1100 | loff_t endbyte = 0; |
ea9b9907 NP |
1101 | |
1102 | WARN_ON(iocb->ki_pos != pos); | |
1103 | ||
4273b793 AA |
1104 | ocount = 0; |
1105 | err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); | |
ea9b9907 NP |
1106 | if (err) |
1107 | return err; | |
1108 | ||
4273b793 | 1109 | count = ocount; |
ea9b9907 | 1110 | mutex_lock(&inode->i_mutex); |
ea9b9907 NP |
1111 | |
1112 | /* We can write back this queue in page reclaim */ | |
1113 | current->backing_dev_info = mapping->backing_dev_info; | |
1114 | ||
1115 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | |
1116 | if (err) | |
1117 | goto out; | |
1118 | ||
1119 | if (count == 0) | |
1120 | goto out; | |
1121 | ||
2f1936b8 | 1122 | err = file_remove_suid(file); |
ea9b9907 NP |
1123 | if (err) |
1124 | goto out; | |
1125 | ||
c3b2da31 JB |
1126 | err = file_update_time(file); |
1127 | if (err) | |
1128 | goto out; | |
ea9b9907 | 1129 | |
4273b793 AA |
1130 | if (file->f_flags & O_DIRECT) { |
1131 | written = generic_file_direct_write(iocb, iov, &nr_segs, | |
1132 | pos, &iocb->ki_pos, | |
1133 | count, ocount); | |
1134 | if (written < 0 || written == count) | |
1135 | goto out; | |
1136 | ||
1137 | pos += written; | |
1138 | count -= written; | |
ea9b9907 | 1139 | |
4273b793 AA |
1140 | iov_iter_init(&i, iov, nr_segs, count, written); |
1141 | written_buffered = fuse_perform_write(file, mapping, &i, pos); | |
1142 | if (written_buffered < 0) { | |
1143 | err = written_buffered; | |
1144 | goto out; | |
1145 | } | |
1146 | endbyte = pos + written_buffered - 1; | |
1147 | ||
1148 | err = filemap_write_and_wait_range(file->f_mapping, pos, | |
1149 | endbyte); | |
1150 | if (err) | |
1151 | goto out; | |
1152 | ||
1153 | invalidate_mapping_pages(file->f_mapping, | |
1154 | pos >> PAGE_CACHE_SHIFT, | |
1155 | endbyte >> PAGE_CACHE_SHIFT); | |
1156 | ||
1157 | written += written_buffered; | |
1158 | iocb->ki_pos = pos + written_buffered; | |
1159 | } else { | |
1160 | iov_iter_init(&i, iov, nr_segs, count, 0); | |
1161 | written = fuse_perform_write(file, mapping, &i, pos); | |
1162 | if (written >= 0) | |
1163 | iocb->ki_pos = pos + written; | |
1164 | } | |
ea9b9907 NP |
1165 | out: |
1166 | current->backing_dev_info = NULL; | |
1167 | mutex_unlock(&inode->i_mutex); | |
1168 | ||
1169 | return written ? written : err; | |
1170 | } | |
1171 | ||
7c190c8b MP |
1172 | static inline void fuse_page_descs_length_init(struct fuse_req *req, |
1173 | unsigned index, unsigned nr_pages) | |
85f40aec MP |
1174 | { |
1175 | int i; | |
1176 | ||
7c190c8b | 1177 | for (i = index; i < index + nr_pages; i++) |
85f40aec MP |
1178 | req->page_descs[i].length = PAGE_SIZE - |
1179 | req->page_descs[i].offset; | |
1180 | } | |
1181 | ||
7c190c8b MP |
1182 | static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii) |
1183 | { | |
1184 | return (unsigned long)ii->iov->iov_base + ii->iov_offset; | |
1185 | } | |
1186 | ||
1187 | static inline size_t fuse_get_frag_size(const struct iov_iter *ii, | |
1188 | size_t max_size) | |
1189 | { | |
1190 | return min(iov_iter_single_seg_count(ii), max_size); | |
1191 | } | |
1192 | ||
b98d023a | 1193 | static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, |
ce60a2f1 | 1194 | size_t *nbytesp, int write) |
413ef8cb | 1195 | { |
7c190c8b | 1196 | size_t nbytes = 0; /* # bytes already packed in req */ |
b98d023a | 1197 | |
f4975c67 MS |
1198 | /* Special case for kernel I/O: can copy directly into the buffer */ |
1199 | if (segment_eq(get_fs(), KERNEL_DS)) { | |
7c190c8b MP |
1200 | unsigned long user_addr = fuse_get_user_addr(ii); |
1201 | size_t frag_size = fuse_get_frag_size(ii, *nbytesp); | |
1202 | ||
f4975c67 MS |
1203 | if (write) |
1204 | req->in.args[1].value = (void *) user_addr; | |
1205 | else | |
1206 | req->out.args[0].value = (void *) user_addr; | |
1207 | ||
b98d023a MP |
1208 | iov_iter_advance(ii, frag_size); |
1209 | *nbytesp = frag_size; | |
f4975c67 MS |
1210 | return 0; |
1211 | } | |
413ef8cb | 1212 | |
5565a9d8 | 1213 | while (nbytes < *nbytesp && req->num_pages < req->max_pages) { |
7c190c8b MP |
1214 | unsigned npages; |
1215 | unsigned long user_addr = fuse_get_user_addr(ii); | |
1216 | unsigned offset = user_addr & ~PAGE_MASK; | |
1217 | size_t frag_size = fuse_get_frag_size(ii, *nbytesp - nbytes); | |
1218 | int ret; | |
413ef8cb | 1219 | |
5565a9d8 | 1220 | unsigned n = req->max_pages - req->num_pages; |
7c190c8b MP |
1221 | frag_size = min_t(size_t, frag_size, n << PAGE_SHIFT); |
1222 | ||
1223 | npages = (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
1224 | npages = clamp(npages, 1U, n); | |
1225 | ||
1226 | ret = get_user_pages_fast(user_addr, npages, !write, | |
1227 | &req->pages[req->num_pages]); | |
1228 | if (ret < 0) | |
1229 | return ret; | |
1230 | ||
1231 | npages = ret; | |
1232 | frag_size = min_t(size_t, frag_size, | |
1233 | (npages << PAGE_SHIFT) - offset); | |
1234 | iov_iter_advance(ii, frag_size); | |
1235 | ||
1236 | req->page_descs[req->num_pages].offset = offset; | |
1237 | fuse_page_descs_length_init(req, req->num_pages, npages); | |
1238 | ||
1239 | req->num_pages += npages; | |
1240 | req->page_descs[req->num_pages - 1].length -= | |
1241 | (npages << PAGE_SHIFT) - offset - frag_size; | |
1242 | ||
1243 | nbytes += frag_size; | |
1244 | } | |
f4975c67 MS |
1245 | |
1246 | if (write) | |
1247 | req->in.argpages = 1; | |
1248 | else | |
1249 | req->out.argpages = 1; | |
1250 | ||
7c190c8b | 1251 | *nbytesp = nbytes; |
f4975c67 | 1252 | |
413ef8cb MS |
1253 | return 0; |
1254 | } | |
1255 | ||
5565a9d8 MP |
1256 | static inline int fuse_iter_npages(const struct iov_iter *ii_p) |
1257 | { | |
1258 | struct iov_iter ii = *ii_p; | |
1259 | int npages = 0; | |
1260 | ||
1261 | while (iov_iter_count(&ii) && npages < FUSE_MAX_PAGES_PER_REQ) { | |
1262 | unsigned long user_addr = fuse_get_user_addr(&ii); | |
1263 | unsigned offset = user_addr & ~PAGE_MASK; | |
1264 | size_t frag_size = iov_iter_single_seg_count(&ii); | |
1265 | ||
1266 | npages += (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
1267 | iov_iter_advance(&ii, frag_size); | |
1268 | } | |
1269 | ||
1270 | return min(npages, FUSE_MAX_PAGES_PER_REQ); | |
1271 | } | |
1272 | ||
36cf66ed | 1273 | ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov, |
fb05f41f MS |
1274 | unsigned long nr_segs, size_t count, loff_t *ppos, |
1275 | int write) | |
413ef8cb | 1276 | { |
36cf66ed | 1277 | struct file *file = io->file; |
2106cb18 MS |
1278 | struct fuse_file *ff = file->private_data; |
1279 | struct fuse_conn *fc = ff->fc; | |
413ef8cb MS |
1280 | size_t nmax = write ? fc->max_write : fc->max_read; |
1281 | loff_t pos = *ppos; | |
1282 | ssize_t res = 0; | |
248d86e8 | 1283 | struct fuse_req *req; |
b98d023a MP |
1284 | struct iov_iter ii; |
1285 | ||
1286 | iov_iter_init(&ii, iov, nr_segs, count, 0); | |
248d86e8 | 1287 | |
de82b923 BF |
1288 | if (io->async) |
1289 | req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii)); | |
1290 | else | |
1291 | req = fuse_get_req(fc, fuse_iter_npages(&ii)); | |
ce1d5a49 MS |
1292 | if (IS_ERR(req)) |
1293 | return PTR_ERR(req); | |
413ef8cb MS |
1294 | |
1295 | while (count) { | |
413ef8cb | 1296 | size_t nres; |
2106cb18 | 1297 | fl_owner_t owner = current->files; |
f4975c67 | 1298 | size_t nbytes = min(count, nmax); |
b98d023a | 1299 | int err = fuse_get_user_pages(req, &ii, &nbytes, write); |
413ef8cb MS |
1300 | if (err) { |
1301 | res = err; | |
1302 | break; | |
1303 | } | |
f4975c67 | 1304 | |
413ef8cb | 1305 | if (write) |
36cf66ed | 1306 | nres = fuse_send_write(req, io, pos, nbytes, owner); |
413ef8cb | 1307 | else |
36cf66ed | 1308 | nres = fuse_send_read(req, io, pos, nbytes, owner); |
2106cb18 | 1309 | |
36cf66ed MP |
1310 | if (!io->async) |
1311 | fuse_release_user_pages(req, !write); | |
413ef8cb MS |
1312 | if (req->out.h.error) { |
1313 | if (!res) | |
1314 | res = req->out.h.error; | |
1315 | break; | |
1316 | } else if (nres > nbytes) { | |
1317 | res = -EIO; | |
1318 | break; | |
1319 | } | |
1320 | count -= nres; | |
1321 | res += nres; | |
1322 | pos += nres; | |
413ef8cb MS |
1323 | if (nres != nbytes) |
1324 | break; | |
56cf34ff MS |
1325 | if (count) { |
1326 | fuse_put_request(fc, req); | |
de82b923 BF |
1327 | if (io->async) |
1328 | req = fuse_get_req_for_background(fc, | |
1329 | fuse_iter_npages(&ii)); | |
1330 | else | |
1331 | req = fuse_get_req(fc, fuse_iter_npages(&ii)); | |
56cf34ff MS |
1332 | if (IS_ERR(req)) |
1333 | break; | |
1334 | } | |
413ef8cb | 1335 | } |
f60311d5 AA |
1336 | if (!IS_ERR(req)) |
1337 | fuse_put_request(fc, req); | |
d09cb9d7 | 1338 | if (res > 0) |
413ef8cb | 1339 | *ppos = pos; |
413ef8cb MS |
1340 | |
1341 | return res; | |
1342 | } | |
08cbf542 | 1343 | EXPORT_SYMBOL_GPL(fuse_direct_io); |
413ef8cb | 1344 | |
36cf66ed MP |
1345 | static ssize_t __fuse_direct_read(struct fuse_io_priv *io, |
1346 | const struct iovec *iov, | |
439ee5f0 MP |
1347 | unsigned long nr_segs, loff_t *ppos, |
1348 | size_t count) | |
413ef8cb | 1349 | { |
d09cb9d7 | 1350 | ssize_t res; |
36cf66ed | 1351 | struct file *file = io->file; |
6131ffaa | 1352 | struct inode *inode = file_inode(file); |
d09cb9d7 MS |
1353 | |
1354 | if (is_bad_inode(inode)) | |
1355 | return -EIO; | |
1356 | ||
439ee5f0 | 1357 | res = fuse_direct_io(io, iov, nr_segs, count, ppos, 0); |
d09cb9d7 MS |
1358 | |
1359 | fuse_invalidate_attr(inode); | |
1360 | ||
1361 | return res; | |
413ef8cb MS |
1362 | } |
1363 | ||
b98d023a MP |
1364 | static ssize_t fuse_direct_read(struct file *file, char __user *buf, |
1365 | size_t count, loff_t *ppos) | |
1366 | { | |
36cf66ed | 1367 | struct fuse_io_priv io = { .async = 0, .file = file }; |
fb05f41f | 1368 | struct iovec iov = { .iov_base = buf, .iov_len = count }; |
439ee5f0 | 1369 | return __fuse_direct_read(&io, &iov, 1, ppos, count); |
b98d023a MP |
1370 | } |
1371 | ||
36cf66ed MP |
1372 | static ssize_t __fuse_direct_write(struct fuse_io_priv *io, |
1373 | const struct iovec *iov, | |
b98d023a | 1374 | unsigned long nr_segs, loff_t *ppos) |
413ef8cb | 1375 | { |
36cf66ed | 1376 | struct file *file = io->file; |
6131ffaa | 1377 | struct inode *inode = file_inode(file); |
b98d023a | 1378 | size_t count = iov_length(iov, nr_segs); |
413ef8cb | 1379 | ssize_t res; |
d09cb9d7 | 1380 | |
889f7848 | 1381 | res = generic_write_checks(file, ppos, &count, 0); |
bcba24cc | 1382 | if (!res) |
36cf66ed | 1383 | res = fuse_direct_io(io, iov, nr_segs, count, ppos, 1); |
d09cb9d7 MS |
1384 | |
1385 | fuse_invalidate_attr(inode); | |
1386 | ||
413ef8cb MS |
1387 | return res; |
1388 | } | |
1389 | ||
4273b793 AA |
1390 | static ssize_t fuse_direct_write(struct file *file, const char __user *buf, |
1391 | size_t count, loff_t *ppos) | |
1392 | { | |
fb05f41f | 1393 | struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; |
6131ffaa | 1394 | struct inode *inode = file_inode(file); |
4273b793 | 1395 | ssize_t res; |
36cf66ed | 1396 | struct fuse_io_priv io = { .async = 0, .file = file }; |
4273b793 AA |
1397 | |
1398 | if (is_bad_inode(inode)) | |
1399 | return -EIO; | |
1400 | ||
1401 | /* Don't allow parallel writes to the same file */ | |
1402 | mutex_lock(&inode->i_mutex); | |
36cf66ed | 1403 | res = __fuse_direct_write(&io, &iov, 1, ppos); |
bcba24cc MP |
1404 | if (res > 0) |
1405 | fuse_write_update_size(inode, *ppos); | |
4273b793 AA |
1406 | mutex_unlock(&inode->i_mutex); |
1407 | ||
1408 | return res; | |
1409 | } | |
1410 | ||
3be5a52b | 1411 | static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) |
b6aeaded | 1412 | { |
385b1268 PE |
1413 | int i; |
1414 | ||
1415 | for (i = 0; i < req->num_pages; i++) | |
1416 | __free_page(req->pages[i]); | |
8b284dc4 MS |
1417 | |
1418 | if (req->ff) | |
1419 | fuse_file_put(req->ff, false); | |
3be5a52b MS |
1420 | } |
1421 | ||
1422 | static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) | |
1423 | { | |
1424 | struct inode *inode = req->inode; | |
1425 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1426 | struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; | |
385b1268 | 1427 | int i; |
3be5a52b MS |
1428 | |
1429 | list_del(&req->writepages_entry); | |
385b1268 PE |
1430 | for (i = 0; i < req->num_pages; i++) { |
1431 | dec_bdi_stat(bdi, BDI_WRITEBACK); | |
1432 | dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP); | |
1433 | bdi_writeout_inc(bdi); | |
1434 | } | |
3be5a52b MS |
1435 | wake_up(&fi->page_waitq); |
1436 | } | |
1437 | ||
1438 | /* Called under fc->lock, may release and reacquire it */ | |
6eaf4782 MP |
1439 | static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req, |
1440 | loff_t size) | |
b9ca67b2 MS |
1441 | __releases(fc->lock) |
1442 | __acquires(fc->lock) | |
3be5a52b MS |
1443 | { |
1444 | struct fuse_inode *fi = get_fuse_inode(req->inode); | |
3be5a52b | 1445 | struct fuse_write_in *inarg = &req->misc.write.in; |
385b1268 | 1446 | __u64 data_size = req->num_pages * PAGE_CACHE_SIZE; |
3be5a52b MS |
1447 | |
1448 | if (!fc->connected) | |
1449 | goto out_free; | |
1450 | ||
385b1268 PE |
1451 | if (inarg->offset + data_size <= size) { |
1452 | inarg->size = data_size; | |
3be5a52b | 1453 | } else if (inarg->offset < size) { |
385b1268 | 1454 | inarg->size = size - inarg->offset; |
3be5a52b MS |
1455 | } else { |
1456 | /* Got truncated off completely */ | |
1457 | goto out_free; | |
b6aeaded | 1458 | } |
3be5a52b MS |
1459 | |
1460 | req->in.args[1].size = inarg->size; | |
1461 | fi->writectr++; | |
b93f858a | 1462 | fuse_request_send_background_locked(fc, req); |
3be5a52b MS |
1463 | return; |
1464 | ||
1465 | out_free: | |
1466 | fuse_writepage_finish(fc, req); | |
1467 | spin_unlock(&fc->lock); | |
1468 | fuse_writepage_free(fc, req); | |
e9bb09dd | 1469 | fuse_put_request(fc, req); |
3be5a52b | 1470 | spin_lock(&fc->lock); |
b6aeaded MS |
1471 | } |
1472 | ||
3be5a52b MS |
1473 | /* |
1474 | * If fi->writectr is positive (no truncate or fsync going on) send | |
1475 | * all queued writepage requests. | |
1476 | * | |
1477 | * Called with fc->lock | |
1478 | */ | |
1479 | void fuse_flush_writepages(struct inode *inode) | |
b9ca67b2 MS |
1480 | __releases(fc->lock) |
1481 | __acquires(fc->lock) | |
b6aeaded | 1482 | { |
3be5a52b MS |
1483 | struct fuse_conn *fc = get_fuse_conn(inode); |
1484 | struct fuse_inode *fi = get_fuse_inode(inode); | |
6eaf4782 | 1485 | size_t crop = i_size_read(inode); |
3be5a52b MS |
1486 | struct fuse_req *req; |
1487 | ||
1488 | while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { | |
1489 | req = list_entry(fi->queued_writes.next, struct fuse_req, list); | |
1490 | list_del_init(&req->list); | |
6eaf4782 | 1491 | fuse_send_writepage(fc, req, crop); |
3be5a52b MS |
1492 | } |
1493 | } | |
1494 | ||
1495 | static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) | |
1496 | { | |
1497 | struct inode *inode = req->inode; | |
1498 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1499 | ||
1500 | mapping_set_error(inode->i_mapping, req->out.h.error); | |
1501 | spin_lock(&fc->lock); | |
8b284dc4 | 1502 | while (req->misc.write.next) { |
6eaf4782 MP |
1503 | struct fuse_conn *fc = get_fuse_conn(inode); |
1504 | struct fuse_write_in *inarg = &req->misc.write.in; | |
8b284dc4 MS |
1505 | struct fuse_req *next = req->misc.write.next; |
1506 | req->misc.write.next = next->misc.write.next; | |
1507 | next->misc.write.next = NULL; | |
ce128de6 | 1508 | next->ff = fuse_file_get(req->ff); |
8b284dc4 | 1509 | list_add(&next->writepages_entry, &fi->writepages); |
6eaf4782 MP |
1510 | |
1511 | /* | |
1512 | * Skip fuse_flush_writepages() to make it easy to crop requests | |
1513 | * based on primary request size. | |
1514 | * | |
1515 | * 1st case (trivial): there are no concurrent activities using | |
1516 | * fuse_set/release_nowrite. Then we're on safe side because | |
1517 | * fuse_flush_writepages() would call fuse_send_writepage() | |
1518 | * anyway. | |
1519 | * | |
1520 | * 2nd case: someone called fuse_set_nowrite and it is waiting | |
1521 | * now for completion of all in-flight requests. This happens | |
1522 | * rarely and no more than once per page, so this should be | |
1523 | * okay. | |
1524 | * | |
1525 | * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle | |
1526 | * of fuse_set_nowrite..fuse_release_nowrite section. The fact | |
1527 | * that fuse_set_nowrite returned implies that all in-flight | |
1528 | * requests were completed along with all of their secondary | |
1529 | * requests. Further primary requests are blocked by negative | |
1530 | * writectr. Hence there cannot be any in-flight requests and | |
1531 | * no invocations of fuse_writepage_end() while we're in | |
1532 | * fuse_set_nowrite..fuse_release_nowrite section. | |
1533 | */ | |
1534 | fuse_send_writepage(fc, next, inarg->offset + inarg->size); | |
8b284dc4 | 1535 | } |
3be5a52b MS |
1536 | fi->writectr--; |
1537 | fuse_writepage_finish(fc, req); | |
1538 | spin_unlock(&fc->lock); | |
1539 | fuse_writepage_free(fc, req); | |
1540 | } | |
1541 | ||
26d614df PE |
1542 | static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc, |
1543 | struct fuse_inode *fi) | |
adcadfa8 | 1544 | { |
72523425 | 1545 | struct fuse_file *ff = NULL; |
adcadfa8 PE |
1546 | |
1547 | spin_lock(&fc->lock); | |
72523425 MS |
1548 | if (!WARN_ON(list_empty(&fi->write_files))) { |
1549 | ff = list_entry(fi->write_files.next, struct fuse_file, | |
1550 | write_entry); | |
1551 | fuse_file_get(ff); | |
1552 | } | |
adcadfa8 PE |
1553 | spin_unlock(&fc->lock); |
1554 | ||
1555 | return ff; | |
1556 | } | |
1557 | ||
3be5a52b MS |
1558 | static int fuse_writepage_locked(struct page *page) |
1559 | { | |
1560 | struct address_space *mapping = page->mapping; | |
1561 | struct inode *inode = mapping->host; | |
1562 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1563 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1564 | struct fuse_req *req; | |
3be5a52b | 1565 | struct page *tmp_page; |
72523425 | 1566 | int error = -ENOMEM; |
3be5a52b MS |
1567 | |
1568 | set_page_writeback(page); | |
1569 | ||
4250c066 | 1570 | req = fuse_request_alloc_nofs(1); |
3be5a52b MS |
1571 | if (!req) |
1572 | goto err; | |
1573 | ||
8b41e671 | 1574 | req->background = 1; /* writeback always goes to bg_queue */ |
3be5a52b MS |
1575 | tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); |
1576 | if (!tmp_page) | |
1577 | goto err_free; | |
1578 | ||
72523425 | 1579 | error = -EIO; |
26d614df | 1580 | req->ff = fuse_write_file_get(fc, fi); |
72523425 MS |
1581 | if (!req->ff) |
1582 | goto err_free; | |
1583 | ||
adcadfa8 | 1584 | fuse_write_fill(req, req->ff, page_offset(page), 0); |
3be5a52b MS |
1585 | |
1586 | copy_highpage(tmp_page, page); | |
2d698b07 | 1587 | req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; |
8b284dc4 | 1588 | req->misc.write.next = NULL; |
f4975c67 | 1589 | req->in.argpages = 1; |
3be5a52b MS |
1590 | req->num_pages = 1; |
1591 | req->pages[0] = tmp_page; | |
b2430d75 | 1592 | req->page_descs[0].offset = 0; |
85f40aec | 1593 | req->page_descs[0].length = PAGE_SIZE; |
3be5a52b MS |
1594 | req->end = fuse_writepage_end; |
1595 | req->inode = inode; | |
1596 | ||
1597 | inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK); | |
1598 | inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); | |
3be5a52b MS |
1599 | |
1600 | spin_lock(&fc->lock); | |
1601 | list_add(&req->writepages_entry, &fi->writepages); | |
1602 | list_add_tail(&req->list, &fi->queued_writes); | |
1603 | fuse_flush_writepages(inode); | |
1604 | spin_unlock(&fc->lock); | |
1605 | ||
4a4ac4eb MP |
1606 | end_page_writeback(page); |
1607 | ||
3be5a52b MS |
1608 | return 0; |
1609 | ||
1610 | err_free: | |
1611 | fuse_request_free(req); | |
1612 | err: | |
1613 | end_page_writeback(page); | |
72523425 | 1614 | return error; |
3be5a52b MS |
1615 | } |
1616 | ||
1617 | static int fuse_writepage(struct page *page, struct writeback_control *wbc) | |
1618 | { | |
1619 | int err; | |
1620 | ||
ff17be08 MS |
1621 | if (fuse_page_is_writeback(page->mapping->host, page->index)) { |
1622 | /* | |
1623 | * ->writepages() should be called for sync() and friends. We | |
1624 | * should only get here on direct reclaim and then we are | |
1625 | * allowed to skip a page which is already in flight | |
1626 | */ | |
1627 | WARN_ON(wbc->sync_mode == WB_SYNC_ALL); | |
1628 | ||
1629 | redirty_page_for_writepage(wbc, page); | |
1630 | return 0; | |
1631 | } | |
1632 | ||
3be5a52b MS |
1633 | err = fuse_writepage_locked(page); |
1634 | unlock_page(page); | |
1635 | ||
1636 | return err; | |
1637 | } | |
1638 | ||
26d614df PE |
1639 | struct fuse_fill_wb_data { |
1640 | struct fuse_req *req; | |
1641 | struct fuse_file *ff; | |
1642 | struct inode *inode; | |
2d033eaa | 1643 | struct page **orig_pages; |
26d614df PE |
1644 | }; |
1645 | ||
1646 | static void fuse_writepages_send(struct fuse_fill_wb_data *data) | |
1647 | { | |
1648 | struct fuse_req *req = data->req; | |
1649 | struct inode *inode = data->inode; | |
1650 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1651 | struct fuse_inode *fi = get_fuse_inode(inode); | |
2d033eaa MP |
1652 | int num_pages = req->num_pages; |
1653 | int i; | |
26d614df PE |
1654 | |
1655 | req->ff = fuse_file_get(data->ff); | |
1656 | spin_lock(&fc->lock); | |
1657 | list_add_tail(&req->list, &fi->queued_writes); | |
1658 | fuse_flush_writepages(inode); | |
1659 | spin_unlock(&fc->lock); | |
2d033eaa MP |
1660 | |
1661 | for (i = 0; i < num_pages; i++) | |
1662 | end_page_writeback(data->orig_pages[i]); | |
26d614df PE |
1663 | } |
1664 | ||
8b284dc4 MS |
1665 | static bool fuse_writepage_in_flight(struct fuse_req *new_req, |
1666 | struct page *page) | |
1667 | { | |
1668 | struct fuse_conn *fc = get_fuse_conn(new_req->inode); | |
1669 | struct fuse_inode *fi = get_fuse_inode(new_req->inode); | |
1670 | struct fuse_req *tmp; | |
1671 | struct fuse_req *old_req; | |
1672 | bool found = false; | |
1673 | pgoff_t curr_index; | |
1674 | ||
1675 | BUG_ON(new_req->num_pages != 0); | |
1676 | ||
1677 | spin_lock(&fc->lock); | |
1678 | list_del(&new_req->writepages_entry); | |
8b284dc4 MS |
1679 | list_for_each_entry(old_req, &fi->writepages, writepages_entry) { |
1680 | BUG_ON(old_req->inode != new_req->inode); | |
1681 | curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT; | |
1682 | if (curr_index <= page->index && | |
1683 | page->index < curr_index + old_req->num_pages) { | |
1684 | found = true; | |
1685 | break; | |
1686 | } | |
1687 | } | |
f6011081 MP |
1688 | if (!found) { |
1689 | list_add(&new_req->writepages_entry, &fi->writepages); | |
8b284dc4 | 1690 | goto out_unlock; |
f6011081 | 1691 | } |
8b284dc4 | 1692 | |
f6011081 | 1693 | new_req->num_pages = 1; |
8b284dc4 MS |
1694 | for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) { |
1695 | BUG_ON(tmp->inode != new_req->inode); | |
1696 | curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT; | |
1697 | if (tmp->num_pages == 1 && | |
1698 | curr_index == page->index) { | |
1699 | old_req = tmp; | |
1700 | } | |
1701 | } | |
1702 | ||
1703 | if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT || | |
1704 | old_req->state == FUSE_REQ_PENDING)) { | |
41b6e41f MP |
1705 | struct backing_dev_info *bdi = page->mapping->backing_dev_info; |
1706 | ||
8b284dc4 MS |
1707 | copy_highpage(old_req->pages[0], page); |
1708 | spin_unlock(&fc->lock); | |
1709 | ||
41b6e41f | 1710 | dec_bdi_stat(bdi, BDI_WRITEBACK); |
8b284dc4 | 1711 | dec_zone_page_state(page, NR_WRITEBACK_TEMP); |
41b6e41f | 1712 | bdi_writeout_inc(bdi); |
8b284dc4 MS |
1713 | fuse_writepage_free(fc, new_req); |
1714 | fuse_request_free(new_req); | |
1715 | goto out; | |
1716 | } else { | |
1717 | new_req->misc.write.next = old_req->misc.write.next; | |
1718 | old_req->misc.write.next = new_req; | |
1719 | } | |
1720 | out_unlock: | |
1721 | spin_unlock(&fc->lock); | |
1722 | out: | |
1723 | return found; | |
1724 | } | |
1725 | ||
26d614df PE |
1726 | static int fuse_writepages_fill(struct page *page, |
1727 | struct writeback_control *wbc, void *_data) | |
1728 | { | |
1729 | struct fuse_fill_wb_data *data = _data; | |
1730 | struct fuse_req *req = data->req; | |
1731 | struct inode *inode = data->inode; | |
1732 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1733 | struct page *tmp_page; | |
8b284dc4 | 1734 | bool is_writeback; |
26d614df PE |
1735 | int err; |
1736 | ||
1737 | if (!data->ff) { | |
1738 | err = -EIO; | |
1739 | data->ff = fuse_write_file_get(fc, get_fuse_inode(inode)); | |
1740 | if (!data->ff) | |
1741 | goto out_unlock; | |
1742 | } | |
1743 | ||
8b284dc4 MS |
1744 | /* |
1745 | * Being under writeback is unlikely but possible. For example direct | |
1746 | * read to an mmaped fuse file will set the page dirty twice; once when | |
1747 | * the pages are faulted with get_user_pages(), and then after the read | |
1748 | * completed. | |
1749 | */ | |
1750 | is_writeback = fuse_page_is_writeback(inode, page->index); | |
1751 | ||
1752 | if (req && req->num_pages && | |
1753 | (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ || | |
1754 | (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write || | |
1755 | data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) { | |
1756 | fuse_writepages_send(data); | |
1757 | data->req = NULL; | |
26d614df PE |
1758 | } |
1759 | err = -ENOMEM; | |
1760 | tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); | |
1761 | if (!tmp_page) | |
1762 | goto out_unlock; | |
1763 | ||
1764 | /* | |
1765 | * The page must not be redirtied until the writeout is completed | |
1766 | * (i.e. userspace has sent a reply to the write request). Otherwise | |
1767 | * there could be more than one temporary page instance for each real | |
1768 | * page. | |
1769 | * | |
1770 | * This is ensured by holding the page lock in page_mkwrite() while | |
1771 | * checking fuse_page_is_writeback(). We already hold the page lock | |
1772 | * since clear_page_dirty_for_io() and keep it held until we add the | |
1773 | * request to the fi->writepages list and increment req->num_pages. | |
1774 | * After this fuse_page_is_writeback() will indicate that the page is | |
1775 | * under writeback, so we can release the page lock. | |
1776 | */ | |
1777 | if (data->req == NULL) { | |
1778 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1779 | ||
1780 | err = -ENOMEM; | |
1781 | req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ); | |
1782 | if (!req) { | |
1783 | __free_page(tmp_page); | |
1784 | goto out_unlock; | |
1785 | } | |
1786 | ||
1787 | fuse_write_fill(req, data->ff, page_offset(page), 0); | |
1788 | req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; | |
8b284dc4 | 1789 | req->misc.write.next = NULL; |
26d614df PE |
1790 | req->in.argpages = 1; |
1791 | req->background = 1; | |
1792 | req->num_pages = 0; | |
1793 | req->end = fuse_writepage_end; | |
1794 | req->inode = inode; | |
1795 | ||
1796 | spin_lock(&fc->lock); | |
1797 | list_add(&req->writepages_entry, &fi->writepages); | |
1798 | spin_unlock(&fc->lock); | |
1799 | ||
1800 | data->req = req; | |
1801 | } | |
1802 | set_page_writeback(page); | |
1803 | ||
1804 | copy_highpage(tmp_page, page); | |
1805 | req->pages[req->num_pages] = tmp_page; | |
1806 | req->page_descs[req->num_pages].offset = 0; | |
1807 | req->page_descs[req->num_pages].length = PAGE_SIZE; | |
1808 | ||
1809 | inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK); | |
1810 | inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); | |
8b284dc4 MS |
1811 | |
1812 | err = 0; | |
1813 | if (is_writeback && fuse_writepage_in_flight(req, page)) { | |
1814 | end_page_writeback(page); | |
1815 | data->req = NULL; | |
1816 | goto out_unlock; | |
1817 | } | |
2d033eaa | 1818 | data->orig_pages[req->num_pages] = page; |
26d614df PE |
1819 | |
1820 | /* | |
1821 | * Protected by fc->lock against concurrent access by | |
1822 | * fuse_page_is_writeback(). | |
1823 | */ | |
1824 | spin_lock(&fc->lock); | |
1825 | req->num_pages++; | |
1826 | spin_unlock(&fc->lock); | |
1827 | ||
26d614df PE |
1828 | out_unlock: |
1829 | unlock_page(page); | |
1830 | ||
1831 | return err; | |
1832 | } | |
1833 | ||
1834 | static int fuse_writepages(struct address_space *mapping, | |
1835 | struct writeback_control *wbc) | |
1836 | { | |
1837 | struct inode *inode = mapping->host; | |
1838 | struct fuse_fill_wb_data data; | |
1839 | int err; | |
1840 | ||
1841 | err = -EIO; | |
1842 | if (is_bad_inode(inode)) | |
1843 | goto out; | |
1844 | ||
1845 | data.inode = inode; | |
1846 | data.req = NULL; | |
1847 | data.ff = NULL; | |
1848 | ||
2d033eaa MP |
1849 | err = -ENOMEM; |
1850 | data.orig_pages = kzalloc(sizeof(struct page *) * | |
1851 | FUSE_MAX_PAGES_PER_REQ, | |
1852 | GFP_NOFS); | |
1853 | if (!data.orig_pages) | |
1854 | goto out; | |
1855 | ||
26d614df PE |
1856 | err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); |
1857 | if (data.req) { | |
1858 | /* Ignore errors if we can write at least one page */ | |
1859 | BUG_ON(!data.req->num_pages); | |
1860 | fuse_writepages_send(&data); | |
1861 | err = 0; | |
1862 | } | |
1863 | if (data.ff) | |
1864 | fuse_file_put(data.ff, false); | |
2d033eaa MP |
1865 | |
1866 | kfree(data.orig_pages); | |
26d614df PE |
1867 | out: |
1868 | return err; | |
1869 | } | |
1870 | ||
3be5a52b MS |
1871 | static int fuse_launder_page(struct page *page) |
1872 | { | |
1873 | int err = 0; | |
1874 | if (clear_page_dirty_for_io(page)) { | |
1875 | struct inode *inode = page->mapping->host; | |
1876 | err = fuse_writepage_locked(page); | |
1877 | if (!err) | |
1878 | fuse_wait_on_page_writeback(inode, page->index); | |
1879 | } | |
1880 | return err; | |
1881 | } | |
1882 | ||
1883 | /* | |
1884 | * Write back dirty pages now, because there may not be any suitable | |
1885 | * open files later | |
1886 | */ | |
1887 | static void fuse_vma_close(struct vm_area_struct *vma) | |
1888 | { | |
1889 | filemap_write_and_wait(vma->vm_file->f_mapping); | |
1890 | } | |
1891 | ||
1892 | /* | |
1893 | * Wait for writeback against this page to complete before allowing it | |
1894 | * to be marked dirty again, and hence written back again, possibly | |
1895 | * before the previous writepage completed. | |
1896 | * | |
1897 | * Block here, instead of in ->writepage(), so that the userspace fs | |
1898 | * can only block processes actually operating on the filesystem. | |
1899 | * | |
1900 | * Otherwise unprivileged userspace fs would be able to block | |
1901 | * unrelated: | |
1902 | * | |
1903 | * - page migration | |
1904 | * - sync(2) | |
1905 | * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER | |
1906 | */ | |
c2ec175c | 1907 | static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) |
3be5a52b | 1908 | { |
c2ec175c | 1909 | struct page *page = vmf->page; |
cca24370 MS |
1910 | struct inode *inode = file_inode(vma->vm_file); |
1911 | ||
1912 | file_update_time(vma->vm_file); | |
1913 | lock_page(page); | |
1914 | if (page->mapping != inode->i_mapping) { | |
1915 | unlock_page(page); | |
1916 | return VM_FAULT_NOPAGE; | |
1917 | } | |
3be5a52b MS |
1918 | |
1919 | fuse_wait_on_page_writeback(inode, page->index); | |
cca24370 | 1920 | return VM_FAULT_LOCKED; |
3be5a52b MS |
1921 | } |
1922 | ||
f0f37e2f | 1923 | static const struct vm_operations_struct fuse_file_vm_ops = { |
3be5a52b MS |
1924 | .close = fuse_vma_close, |
1925 | .fault = filemap_fault, | |
1926 | .page_mkwrite = fuse_page_mkwrite, | |
0b173bc4 | 1927 | .remap_pages = generic_file_remap_pages, |
3be5a52b MS |
1928 | }; |
1929 | ||
1930 | static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) | |
1931 | { | |
1932 | if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { | |
6131ffaa | 1933 | struct inode *inode = file_inode(file); |
3be5a52b MS |
1934 | struct fuse_conn *fc = get_fuse_conn(inode); |
1935 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1936 | struct fuse_file *ff = file->private_data; | |
1937 | /* | |
1938 | * file may be written through mmap, so chain it onto the | |
1939 | * inodes's write_file list | |
1940 | */ | |
1941 | spin_lock(&fc->lock); | |
1942 | if (list_empty(&ff->write_entry)) | |
1943 | list_add(&ff->write_entry, &fi->write_files); | |
1944 | spin_unlock(&fc->lock); | |
1945 | } | |
1946 | file_accessed(file); | |
1947 | vma->vm_ops = &fuse_file_vm_ops; | |
b6aeaded MS |
1948 | return 0; |
1949 | } | |
1950 | ||
fc280c96 MS |
1951 | static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma) |
1952 | { | |
1953 | /* Can't provide the coherency needed for MAP_SHARED */ | |
1954 | if (vma->vm_flags & VM_MAYSHARE) | |
1955 | return -ENODEV; | |
1956 | ||
3121bfe7 MS |
1957 | invalidate_inode_pages2(file->f_mapping); |
1958 | ||
fc280c96 MS |
1959 | return generic_file_mmap(file, vma); |
1960 | } | |
1961 | ||
71421259 MS |
1962 | static int convert_fuse_file_lock(const struct fuse_file_lock *ffl, |
1963 | struct file_lock *fl) | |
1964 | { | |
1965 | switch (ffl->type) { | |
1966 | case F_UNLCK: | |
1967 | break; | |
1968 | ||
1969 | case F_RDLCK: | |
1970 | case F_WRLCK: | |
1971 | if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || | |
1972 | ffl->end < ffl->start) | |
1973 | return -EIO; | |
1974 | ||
1975 | fl->fl_start = ffl->start; | |
1976 | fl->fl_end = ffl->end; | |
1977 | fl->fl_pid = ffl->pid; | |
1978 | break; | |
1979 | ||
1980 | default: | |
1981 | return -EIO; | |
1982 | } | |
1983 | fl->fl_type = ffl->type; | |
1984 | return 0; | |
1985 | } | |
1986 | ||
1987 | static void fuse_lk_fill(struct fuse_req *req, struct file *file, | |
a9ff4f87 MS |
1988 | const struct file_lock *fl, int opcode, pid_t pid, |
1989 | int flock) | |
71421259 | 1990 | { |
6131ffaa | 1991 | struct inode *inode = file_inode(file); |
9c8ef561 | 1992 | struct fuse_conn *fc = get_fuse_conn(inode); |
71421259 MS |
1993 | struct fuse_file *ff = file->private_data; |
1994 | struct fuse_lk_in *arg = &req->misc.lk_in; | |
1995 | ||
1996 | arg->fh = ff->fh; | |
9c8ef561 | 1997 | arg->owner = fuse_lock_owner_id(fc, fl->fl_owner); |
71421259 MS |
1998 | arg->lk.start = fl->fl_start; |
1999 | arg->lk.end = fl->fl_end; | |
2000 | arg->lk.type = fl->fl_type; | |
2001 | arg->lk.pid = pid; | |
a9ff4f87 MS |
2002 | if (flock) |
2003 | arg->lk_flags |= FUSE_LK_FLOCK; | |
71421259 MS |
2004 | req->in.h.opcode = opcode; |
2005 | req->in.h.nodeid = get_node_id(inode); | |
2006 | req->in.numargs = 1; | |
2007 | req->in.args[0].size = sizeof(*arg); | |
2008 | req->in.args[0].value = arg; | |
2009 | } | |
2010 | ||
2011 | static int fuse_getlk(struct file *file, struct file_lock *fl) | |
2012 | { | |
6131ffaa | 2013 | struct inode *inode = file_inode(file); |
71421259 MS |
2014 | struct fuse_conn *fc = get_fuse_conn(inode); |
2015 | struct fuse_req *req; | |
2016 | struct fuse_lk_out outarg; | |
2017 | int err; | |
2018 | ||
b111c8c0 | 2019 | req = fuse_get_req_nopages(fc); |
71421259 MS |
2020 | if (IS_ERR(req)) |
2021 | return PTR_ERR(req); | |
2022 | ||
a9ff4f87 | 2023 | fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0); |
71421259 MS |
2024 | req->out.numargs = 1; |
2025 | req->out.args[0].size = sizeof(outarg); | |
2026 | req->out.args[0].value = &outarg; | |
b93f858a | 2027 | fuse_request_send(fc, req); |
71421259 MS |
2028 | err = req->out.h.error; |
2029 | fuse_put_request(fc, req); | |
2030 | if (!err) | |
2031 | err = convert_fuse_file_lock(&outarg.lk, fl); | |
2032 | ||
2033 | return err; | |
2034 | } | |
2035 | ||
a9ff4f87 | 2036 | static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) |
71421259 | 2037 | { |
6131ffaa | 2038 | struct inode *inode = file_inode(file); |
71421259 MS |
2039 | struct fuse_conn *fc = get_fuse_conn(inode); |
2040 | struct fuse_req *req; | |
2041 | int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; | |
2042 | pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; | |
2043 | int err; | |
2044 | ||
8fb47a4f | 2045 | if (fl->fl_lmops && fl->fl_lmops->lm_grant) { |
48e90761 MS |
2046 | /* NLM needs asynchronous locks, which we don't support yet */ |
2047 | return -ENOLCK; | |
2048 | } | |
2049 | ||
71421259 MS |
2050 | /* Unlock on close is handled by the flush method */ |
2051 | if (fl->fl_flags & FL_CLOSE) | |
2052 | return 0; | |
2053 | ||
b111c8c0 | 2054 | req = fuse_get_req_nopages(fc); |
71421259 MS |
2055 | if (IS_ERR(req)) |
2056 | return PTR_ERR(req); | |
2057 | ||
a9ff4f87 | 2058 | fuse_lk_fill(req, file, fl, opcode, pid, flock); |
b93f858a | 2059 | fuse_request_send(fc, req); |
71421259 | 2060 | err = req->out.h.error; |
a4d27e75 MS |
2061 | /* locking is restartable */ |
2062 | if (err == -EINTR) | |
2063 | err = -ERESTARTSYS; | |
71421259 MS |
2064 | fuse_put_request(fc, req); |
2065 | return err; | |
2066 | } | |
2067 | ||
2068 | static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) | |
2069 | { | |
6131ffaa | 2070 | struct inode *inode = file_inode(file); |
71421259 MS |
2071 | struct fuse_conn *fc = get_fuse_conn(inode); |
2072 | int err; | |
2073 | ||
48e90761 MS |
2074 | if (cmd == F_CANCELLK) { |
2075 | err = 0; | |
2076 | } else if (cmd == F_GETLK) { | |
71421259 | 2077 | if (fc->no_lock) { |
9d6a8c5c | 2078 | posix_test_lock(file, fl); |
71421259 MS |
2079 | err = 0; |
2080 | } else | |
2081 | err = fuse_getlk(file, fl); | |
2082 | } else { | |
2083 | if (fc->no_lock) | |
48e90761 | 2084 | err = posix_lock_file(file, fl, NULL); |
71421259 | 2085 | else |
a9ff4f87 | 2086 | err = fuse_setlk(file, fl, 0); |
71421259 MS |
2087 | } |
2088 | return err; | |
2089 | } | |
2090 | ||
a9ff4f87 MS |
2091 | static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) |
2092 | { | |
6131ffaa | 2093 | struct inode *inode = file_inode(file); |
a9ff4f87 MS |
2094 | struct fuse_conn *fc = get_fuse_conn(inode); |
2095 | int err; | |
2096 | ||
37fb3a30 | 2097 | if (fc->no_flock) { |
a9ff4f87 MS |
2098 | err = flock_lock_file_wait(file, fl); |
2099 | } else { | |
37fb3a30 MS |
2100 | struct fuse_file *ff = file->private_data; |
2101 | ||
a9ff4f87 MS |
2102 | /* emulate flock with POSIX locks */ |
2103 | fl->fl_owner = (fl_owner_t) file; | |
37fb3a30 | 2104 | ff->flock = true; |
a9ff4f87 MS |
2105 | err = fuse_setlk(file, fl, 1); |
2106 | } | |
2107 | ||
2108 | return err; | |
2109 | } | |
2110 | ||
b2d2272f MS |
2111 | static sector_t fuse_bmap(struct address_space *mapping, sector_t block) |
2112 | { | |
2113 | struct inode *inode = mapping->host; | |
2114 | struct fuse_conn *fc = get_fuse_conn(inode); | |
2115 | struct fuse_req *req; | |
2116 | struct fuse_bmap_in inarg; | |
2117 | struct fuse_bmap_out outarg; | |
2118 | int err; | |
2119 | ||
2120 | if (!inode->i_sb->s_bdev || fc->no_bmap) | |
2121 | return 0; | |
2122 | ||
b111c8c0 | 2123 | req = fuse_get_req_nopages(fc); |
b2d2272f MS |
2124 | if (IS_ERR(req)) |
2125 | return 0; | |
2126 | ||
2127 | memset(&inarg, 0, sizeof(inarg)); | |
2128 | inarg.block = block; | |
2129 | inarg.blocksize = inode->i_sb->s_blocksize; | |
2130 | req->in.h.opcode = FUSE_BMAP; | |
2131 | req->in.h.nodeid = get_node_id(inode); | |
2132 | req->in.numargs = 1; | |
2133 | req->in.args[0].size = sizeof(inarg); | |
2134 | req->in.args[0].value = &inarg; | |
2135 | req->out.numargs = 1; | |
2136 | req->out.args[0].size = sizeof(outarg); | |
2137 | req->out.args[0].value = &outarg; | |
b93f858a | 2138 | fuse_request_send(fc, req); |
b2d2272f MS |
2139 | err = req->out.h.error; |
2140 | fuse_put_request(fc, req); | |
2141 | if (err == -ENOSYS) | |
2142 | fc->no_bmap = 1; | |
2143 | ||
2144 | return err ? 0 : outarg.block; | |
2145 | } | |
2146 | ||
965c8e59 | 2147 | static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence) |
5559b8f4 MS |
2148 | { |
2149 | loff_t retval; | |
6131ffaa | 2150 | struct inode *inode = file_inode(file); |
5559b8f4 | 2151 | |
c07c3d19 | 2152 | /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ |
965c8e59 AM |
2153 | if (whence == SEEK_CUR || whence == SEEK_SET) |
2154 | return generic_file_llseek(file, offset, whence); | |
06222e49 | 2155 | |
c07c3d19 MS |
2156 | mutex_lock(&inode->i_mutex); |
2157 | retval = fuse_update_attributes(inode, NULL, file, NULL); | |
2158 | if (!retval) | |
965c8e59 | 2159 | retval = generic_file_llseek(file, offset, whence); |
5559b8f4 | 2160 | mutex_unlock(&inode->i_mutex); |
c07c3d19 | 2161 | |
5559b8f4 MS |
2162 | return retval; |
2163 | } | |
2164 | ||
59efec7b TH |
2165 | static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, |
2166 | unsigned int nr_segs, size_t bytes, bool to_user) | |
2167 | { | |
2168 | struct iov_iter ii; | |
2169 | int page_idx = 0; | |
2170 | ||
2171 | if (!bytes) | |
2172 | return 0; | |
2173 | ||
2174 | iov_iter_init(&ii, iov, nr_segs, bytes, 0); | |
2175 | ||
2176 | while (iov_iter_count(&ii)) { | |
2177 | struct page *page = pages[page_idx++]; | |
2178 | size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); | |
4aa0edd2 | 2179 | void *kaddr; |
59efec7b | 2180 | |
4aa0edd2 | 2181 | kaddr = kmap(page); |
59efec7b TH |
2182 | |
2183 | while (todo) { | |
2184 | char __user *uaddr = ii.iov->iov_base + ii.iov_offset; | |
2185 | size_t iov_len = ii.iov->iov_len - ii.iov_offset; | |
2186 | size_t copy = min(todo, iov_len); | |
2187 | size_t left; | |
2188 | ||
2189 | if (!to_user) | |
2190 | left = copy_from_user(kaddr, uaddr, copy); | |
2191 | else | |
2192 | left = copy_to_user(uaddr, kaddr, copy); | |
2193 | ||
2194 | if (unlikely(left)) | |
2195 | return -EFAULT; | |
2196 | ||
2197 | iov_iter_advance(&ii, copy); | |
2198 | todo -= copy; | |
2199 | kaddr += copy; | |
2200 | } | |
2201 | ||
0bd87182 | 2202 | kunmap(page); |
59efec7b TH |
2203 | } |
2204 | ||
2205 | return 0; | |
2206 | } | |
2207 | ||
d9d318d3 MS |
2208 | /* |
2209 | * CUSE servers compiled on 32bit broke on 64bit kernels because the | |
2210 | * ABI was defined to be 'struct iovec' which is different on 32bit | |
2211 | * and 64bit. Fortunately we can determine which structure the server | |
2212 | * used from the size of the reply. | |
2213 | */ | |
1baa26b2 MS |
2214 | static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src, |
2215 | size_t transferred, unsigned count, | |
2216 | bool is_compat) | |
d9d318d3 MS |
2217 | { |
2218 | #ifdef CONFIG_COMPAT | |
2219 | if (count * sizeof(struct compat_iovec) == transferred) { | |
2220 | struct compat_iovec *ciov = src; | |
2221 | unsigned i; | |
2222 | ||
2223 | /* | |
2224 | * With this interface a 32bit server cannot support | |
2225 | * non-compat (i.e. ones coming from 64bit apps) ioctl | |
2226 | * requests | |
2227 | */ | |
2228 | if (!is_compat) | |
2229 | return -EINVAL; | |
2230 | ||
2231 | for (i = 0; i < count; i++) { | |
2232 | dst[i].iov_base = compat_ptr(ciov[i].iov_base); | |
2233 | dst[i].iov_len = ciov[i].iov_len; | |
2234 | } | |
2235 | return 0; | |
2236 | } | |
2237 | #endif | |
2238 | ||
2239 | if (count * sizeof(struct iovec) != transferred) | |
2240 | return -EIO; | |
2241 | ||
2242 | memcpy(dst, src, transferred); | |
2243 | return 0; | |
2244 | } | |
2245 | ||
7572777e MS |
2246 | /* Make sure iov_length() won't overflow */ |
2247 | static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) | |
2248 | { | |
2249 | size_t n; | |
2250 | u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; | |
2251 | ||
fb6ccff6 | 2252 | for (n = 0; n < count; n++, iov++) { |
7572777e MS |
2253 | if (iov->iov_len > (size_t) max) |
2254 | return -ENOMEM; | |
2255 | max -= iov->iov_len; | |
2256 | } | |
2257 | return 0; | |
2258 | } | |
2259 | ||
1baa26b2 MS |
2260 | static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst, |
2261 | void *src, size_t transferred, unsigned count, | |
2262 | bool is_compat) | |
2263 | { | |
2264 | unsigned i; | |
2265 | struct fuse_ioctl_iovec *fiov = src; | |
2266 | ||
2267 | if (fc->minor < 16) { | |
2268 | return fuse_copy_ioctl_iovec_old(dst, src, transferred, | |
2269 | count, is_compat); | |
2270 | } | |
2271 | ||
2272 | if (count * sizeof(struct fuse_ioctl_iovec) != transferred) | |
2273 | return -EIO; | |
2274 | ||
2275 | for (i = 0; i < count; i++) { | |
2276 | /* Did the server supply an inappropriate value? */ | |
2277 | if (fiov[i].base != (unsigned long) fiov[i].base || | |
2278 | fiov[i].len != (unsigned long) fiov[i].len) | |
2279 | return -EIO; | |
2280 | ||
2281 | dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base; | |
2282 | dst[i].iov_len = (size_t) fiov[i].len; | |
2283 | ||
2284 | #ifdef CONFIG_COMPAT | |
2285 | if (is_compat && | |
2286 | (ptr_to_compat(dst[i].iov_base) != fiov[i].base || | |
2287 | (compat_size_t) dst[i].iov_len != fiov[i].len)) | |
2288 | return -EIO; | |
2289 | #endif | |
2290 | } | |
2291 | ||
2292 | return 0; | |
2293 | } | |
2294 | ||
2295 | ||
59efec7b TH |
2296 | /* |
2297 | * For ioctls, there is no generic way to determine how much memory | |
2298 | * needs to be read and/or written. Furthermore, ioctls are allowed | |
2299 | * to dereference the passed pointer, so the parameter requires deep | |
2300 | * copying but FUSE has no idea whatsoever about what to copy in or | |
2301 | * out. | |
2302 | * | |
2303 | * This is solved by allowing FUSE server to retry ioctl with | |
2304 | * necessary in/out iovecs. Let's assume the ioctl implementation | |
2305 | * needs to read in the following structure. | |
2306 | * | |
2307 | * struct a { | |
2308 | * char *buf; | |
2309 | * size_t buflen; | |
2310 | * } | |
2311 | * | |
2312 | * On the first callout to FUSE server, inarg->in_size and | |
2313 | * inarg->out_size will be NULL; then, the server completes the ioctl | |
2314 | * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and | |
2315 | * the actual iov array to | |
2316 | * | |
2317 | * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } } | |
2318 | * | |
2319 | * which tells FUSE to copy in the requested area and retry the ioctl. | |
2320 | * On the second round, the server has access to the structure and | |
2321 | * from that it can tell what to look for next, so on the invocation, | |
2322 | * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to | |
2323 | * | |
2324 | * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) }, | |
2325 | * { .iov_base = a.buf, .iov_len = a.buflen } } | |
2326 | * | |
2327 | * FUSE will copy both struct a and the pointed buffer from the | |
2328 | * process doing the ioctl and retry ioctl with both struct a and the | |
2329 | * buffer. | |
2330 | * | |
2331 | * This time, FUSE server has everything it needs and completes ioctl | |
2332 | * without FUSE_IOCTL_RETRY which finishes the ioctl call. | |
2333 | * | |
2334 | * Copying data out works the same way. | |
2335 | * | |
2336 | * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel | |
2337 | * automatically initializes in and out iovs by decoding @cmd with | |
2338 | * _IOC_* macros and the server is not allowed to request RETRY. This | |
2339 | * limits ioctl data transfers to well-formed ioctls and is the forced | |
2340 | * behavior for all FUSE servers. | |
2341 | */ | |
08cbf542 TH |
2342 | long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, |
2343 | unsigned int flags) | |
59efec7b | 2344 | { |
59efec7b | 2345 | struct fuse_file *ff = file->private_data; |
d36f2487 | 2346 | struct fuse_conn *fc = ff->fc; |
59efec7b TH |
2347 | struct fuse_ioctl_in inarg = { |
2348 | .fh = ff->fh, | |
2349 | .cmd = cmd, | |
2350 | .arg = arg, | |
2351 | .flags = flags | |
2352 | }; | |
2353 | struct fuse_ioctl_out outarg; | |
2354 | struct fuse_req *req = NULL; | |
2355 | struct page **pages = NULL; | |
8ac83505 | 2356 | struct iovec *iov_page = NULL; |
59efec7b TH |
2357 | struct iovec *in_iov = NULL, *out_iov = NULL; |
2358 | unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; | |
2359 | size_t in_size, out_size, transferred; | |
2360 | int err; | |
2361 | ||
1baa26b2 MS |
2362 | #if BITS_PER_LONG == 32 |
2363 | inarg.flags |= FUSE_IOCTL_32BIT; | |
2364 | #else | |
2365 | if (flags & FUSE_IOCTL_COMPAT) | |
2366 | inarg.flags |= FUSE_IOCTL_32BIT; | |
2367 | #endif | |
2368 | ||
59efec7b | 2369 | /* assume all the iovs returned by client always fits in a page */ |
1baa26b2 | 2370 | BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); |
59efec7b | 2371 | |
59efec7b | 2372 | err = -ENOMEM; |
c411cc88 | 2373 | pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL); |
8ac83505 | 2374 | iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); |
59efec7b TH |
2375 | if (!pages || !iov_page) |
2376 | goto out; | |
2377 | ||
2378 | /* | |
2379 | * If restricted, initialize IO parameters as encoded in @cmd. | |
2380 | * RETRY from server is not allowed. | |
2381 | */ | |
2382 | if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { | |
8ac83505 | 2383 | struct iovec *iov = iov_page; |
59efec7b | 2384 | |
c9f0523d | 2385 | iov->iov_base = (void __user *)arg; |
59efec7b TH |
2386 | iov->iov_len = _IOC_SIZE(cmd); |
2387 | ||
2388 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | |
2389 | in_iov = iov; | |
2390 | in_iovs = 1; | |
2391 | } | |
2392 | ||
2393 | if (_IOC_DIR(cmd) & _IOC_READ) { | |
2394 | out_iov = iov; | |
2395 | out_iovs = 1; | |
2396 | } | |
2397 | } | |
2398 | ||
2399 | retry: | |
2400 | inarg.in_size = in_size = iov_length(in_iov, in_iovs); | |
2401 | inarg.out_size = out_size = iov_length(out_iov, out_iovs); | |
2402 | ||
2403 | /* | |
2404 | * Out data can be used either for actual out data or iovs, | |
2405 | * make sure there always is at least one page. | |
2406 | */ | |
2407 | out_size = max_t(size_t, out_size, PAGE_SIZE); | |
2408 | max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); | |
2409 | ||
2410 | /* make sure there are enough buffer pages and init request with them */ | |
2411 | err = -ENOMEM; | |
2412 | if (max_pages > FUSE_MAX_PAGES_PER_REQ) | |
2413 | goto out; | |
2414 | while (num_pages < max_pages) { | |
2415 | pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); | |
2416 | if (!pages[num_pages]) | |
2417 | goto out; | |
2418 | num_pages++; | |
2419 | } | |
2420 | ||
54b96670 | 2421 | req = fuse_get_req(fc, num_pages); |
59efec7b TH |
2422 | if (IS_ERR(req)) { |
2423 | err = PTR_ERR(req); | |
2424 | req = NULL; | |
2425 | goto out; | |
2426 | } | |
2427 | memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); | |
2428 | req->num_pages = num_pages; | |
7c190c8b | 2429 | fuse_page_descs_length_init(req, 0, req->num_pages); |
59efec7b TH |
2430 | |
2431 | /* okay, let's send it to the client */ | |
2432 | req->in.h.opcode = FUSE_IOCTL; | |
d36f2487 | 2433 | req->in.h.nodeid = ff->nodeid; |
59efec7b TH |
2434 | req->in.numargs = 1; |
2435 | req->in.args[0].size = sizeof(inarg); | |
2436 | req->in.args[0].value = &inarg; | |
2437 | if (in_size) { | |
2438 | req->in.numargs++; | |
2439 | req->in.args[1].size = in_size; | |
2440 | req->in.argpages = 1; | |
2441 | ||
2442 | err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size, | |
2443 | false); | |
2444 | if (err) | |
2445 | goto out; | |
2446 | } | |
2447 | ||
2448 | req->out.numargs = 2; | |
2449 | req->out.args[0].size = sizeof(outarg); | |
2450 | req->out.args[0].value = &outarg; | |
2451 | req->out.args[1].size = out_size; | |
2452 | req->out.argpages = 1; | |
2453 | req->out.argvar = 1; | |
2454 | ||
b93f858a | 2455 | fuse_request_send(fc, req); |
59efec7b TH |
2456 | err = req->out.h.error; |
2457 | transferred = req->out.args[1].size; | |
2458 | fuse_put_request(fc, req); | |
2459 | req = NULL; | |
2460 | if (err) | |
2461 | goto out; | |
2462 | ||
2463 | /* did it ask for retry? */ | |
2464 | if (outarg.flags & FUSE_IOCTL_RETRY) { | |
8ac83505 | 2465 | void *vaddr; |
59efec7b TH |
2466 | |
2467 | /* no retry if in restricted mode */ | |
2468 | err = -EIO; | |
2469 | if (!(flags & FUSE_IOCTL_UNRESTRICTED)) | |
2470 | goto out; | |
2471 | ||
2472 | in_iovs = outarg.in_iovs; | |
2473 | out_iovs = outarg.out_iovs; | |
2474 | ||
2475 | /* | |
2476 | * Make sure things are in boundary, separate checks | |
2477 | * are to protect against overflow. | |
2478 | */ | |
2479 | err = -ENOMEM; | |
2480 | if (in_iovs > FUSE_IOCTL_MAX_IOV || | |
2481 | out_iovs > FUSE_IOCTL_MAX_IOV || | |
2482 | in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) | |
2483 | goto out; | |
2484 | ||
2408f6ef | 2485 | vaddr = kmap_atomic(pages[0]); |
1baa26b2 | 2486 | err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, |
d9d318d3 MS |
2487 | transferred, in_iovs + out_iovs, |
2488 | (flags & FUSE_IOCTL_COMPAT) != 0); | |
2408f6ef | 2489 | kunmap_atomic(vaddr); |
d9d318d3 MS |
2490 | if (err) |
2491 | goto out; | |
59efec7b | 2492 | |
8ac83505 | 2493 | in_iov = iov_page; |
59efec7b TH |
2494 | out_iov = in_iov + in_iovs; |
2495 | ||
7572777e MS |
2496 | err = fuse_verify_ioctl_iov(in_iov, in_iovs); |
2497 | if (err) | |
2498 | goto out; | |
2499 | ||
2500 | err = fuse_verify_ioctl_iov(out_iov, out_iovs); | |
2501 | if (err) | |
2502 | goto out; | |
2503 | ||
59efec7b TH |
2504 | goto retry; |
2505 | } | |
2506 | ||
2507 | err = -EIO; | |
2508 | if (transferred > inarg.out_size) | |
2509 | goto out; | |
2510 | ||
2511 | err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true); | |
2512 | out: | |
2513 | if (req) | |
2514 | fuse_put_request(fc, req); | |
8ac83505 | 2515 | free_page((unsigned long) iov_page); |
59efec7b TH |
2516 | while (num_pages) |
2517 | __free_page(pages[--num_pages]); | |
2518 | kfree(pages); | |
2519 | ||
2520 | return err ? err : outarg.result; | |
2521 | } | |
08cbf542 | 2522 | EXPORT_SYMBOL_GPL(fuse_do_ioctl); |
59efec7b | 2523 | |
b18da0c5 MS |
2524 | long fuse_ioctl_common(struct file *file, unsigned int cmd, |
2525 | unsigned long arg, unsigned int flags) | |
d36f2487 | 2526 | { |
6131ffaa | 2527 | struct inode *inode = file_inode(file); |
d36f2487 MS |
2528 | struct fuse_conn *fc = get_fuse_conn(inode); |
2529 | ||
c2132c1b | 2530 | if (!fuse_allow_current_process(fc)) |
d36f2487 MS |
2531 | return -EACCES; |
2532 | ||
2533 | if (is_bad_inode(inode)) | |
2534 | return -EIO; | |
2535 | ||
2536 | return fuse_do_ioctl(file, cmd, arg, flags); | |
2537 | } | |
2538 | ||
59efec7b TH |
2539 | static long fuse_file_ioctl(struct file *file, unsigned int cmd, |
2540 | unsigned long arg) | |
2541 | { | |
b18da0c5 | 2542 | return fuse_ioctl_common(file, cmd, arg, 0); |
59efec7b TH |
2543 | } |
2544 | ||
2545 | static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, | |
2546 | unsigned long arg) | |
2547 | { | |
b18da0c5 | 2548 | return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); |
59efec7b TH |
2549 | } |
2550 | ||
95668a69 TH |
2551 | /* |
2552 | * All files which have been polled are linked to RB tree | |
2553 | * fuse_conn->polled_files which is indexed by kh. Walk the tree and | |
2554 | * find the matching one. | |
2555 | */ | |
2556 | static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, | |
2557 | struct rb_node **parent_out) | |
2558 | { | |
2559 | struct rb_node **link = &fc->polled_files.rb_node; | |
2560 | struct rb_node *last = NULL; | |
2561 | ||
2562 | while (*link) { | |
2563 | struct fuse_file *ff; | |
2564 | ||
2565 | last = *link; | |
2566 | ff = rb_entry(last, struct fuse_file, polled_node); | |
2567 | ||
2568 | if (kh < ff->kh) | |
2569 | link = &last->rb_left; | |
2570 | else if (kh > ff->kh) | |
2571 | link = &last->rb_right; | |
2572 | else | |
2573 | return link; | |
2574 | } | |
2575 | ||
2576 | if (parent_out) | |
2577 | *parent_out = last; | |
2578 | return link; | |
2579 | } | |
2580 | ||
2581 | /* | |
2582 | * The file is about to be polled. Make sure it's on the polled_files | |
2583 | * RB tree. Note that files once added to the polled_files tree are | |
2584 | * not removed before the file is released. This is because a file | |
2585 | * polled once is likely to be polled again. | |
2586 | */ | |
2587 | static void fuse_register_polled_file(struct fuse_conn *fc, | |
2588 | struct fuse_file *ff) | |
2589 | { | |
2590 | spin_lock(&fc->lock); | |
2591 | if (RB_EMPTY_NODE(&ff->polled_node)) { | |
2592 | struct rb_node **link, *parent; | |
2593 | ||
2594 | link = fuse_find_polled_node(fc, ff->kh, &parent); | |
2595 | BUG_ON(*link); | |
2596 | rb_link_node(&ff->polled_node, parent, link); | |
2597 | rb_insert_color(&ff->polled_node, &fc->polled_files); | |
2598 | } | |
2599 | spin_unlock(&fc->lock); | |
2600 | } | |
2601 | ||
08cbf542 | 2602 | unsigned fuse_file_poll(struct file *file, poll_table *wait) |
95668a69 | 2603 | { |
95668a69 | 2604 | struct fuse_file *ff = file->private_data; |
797759aa | 2605 | struct fuse_conn *fc = ff->fc; |
95668a69 TH |
2606 | struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; |
2607 | struct fuse_poll_out outarg; | |
2608 | struct fuse_req *req; | |
2609 | int err; | |
2610 | ||
2611 | if (fc->no_poll) | |
2612 | return DEFAULT_POLLMASK; | |
2613 | ||
2614 | poll_wait(file, &ff->poll_wait, wait); | |
0415d291 | 2615 | inarg.events = (__u32)poll_requested_events(wait); |
95668a69 TH |
2616 | |
2617 | /* | |
2618 | * Ask for notification iff there's someone waiting for it. | |
2619 | * The client may ignore the flag and always notify. | |
2620 | */ | |
2621 | if (waitqueue_active(&ff->poll_wait)) { | |
2622 | inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; | |
2623 | fuse_register_polled_file(fc, ff); | |
2624 | } | |
2625 | ||
b111c8c0 | 2626 | req = fuse_get_req_nopages(fc); |
95668a69 | 2627 | if (IS_ERR(req)) |
201fa69a | 2628 | return POLLERR; |
95668a69 TH |
2629 | |
2630 | req->in.h.opcode = FUSE_POLL; | |
797759aa | 2631 | req->in.h.nodeid = ff->nodeid; |
95668a69 TH |
2632 | req->in.numargs = 1; |
2633 | req->in.args[0].size = sizeof(inarg); | |
2634 | req->in.args[0].value = &inarg; | |
2635 | req->out.numargs = 1; | |
2636 | req->out.args[0].size = sizeof(outarg); | |
2637 | req->out.args[0].value = &outarg; | |
b93f858a | 2638 | fuse_request_send(fc, req); |
95668a69 TH |
2639 | err = req->out.h.error; |
2640 | fuse_put_request(fc, req); | |
2641 | ||
2642 | if (!err) | |
2643 | return outarg.revents; | |
2644 | if (err == -ENOSYS) { | |
2645 | fc->no_poll = 1; | |
2646 | return DEFAULT_POLLMASK; | |
2647 | } | |
2648 | return POLLERR; | |
2649 | } | |
08cbf542 | 2650 | EXPORT_SYMBOL_GPL(fuse_file_poll); |
95668a69 TH |
2651 | |
2652 | /* | |
2653 | * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and | |
2654 | * wakes up the poll waiters. | |
2655 | */ | |
2656 | int fuse_notify_poll_wakeup(struct fuse_conn *fc, | |
2657 | struct fuse_notify_poll_wakeup_out *outarg) | |
2658 | { | |
2659 | u64 kh = outarg->kh; | |
2660 | struct rb_node **link; | |
2661 | ||
2662 | spin_lock(&fc->lock); | |
2663 | ||
2664 | link = fuse_find_polled_node(fc, kh, NULL); | |
2665 | if (*link) { | |
2666 | struct fuse_file *ff; | |
2667 | ||
2668 | ff = rb_entry(*link, struct fuse_file, polled_node); | |
2669 | wake_up_interruptible_sync(&ff->poll_wait); | |
2670 | } | |
2671 | ||
2672 | spin_unlock(&fc->lock); | |
2673 | return 0; | |
2674 | } | |
2675 | ||
efb9fa9e MP |
2676 | static void fuse_do_truncate(struct file *file) |
2677 | { | |
2678 | struct inode *inode = file->f_mapping->host; | |
2679 | struct iattr attr; | |
2680 | ||
2681 | attr.ia_valid = ATTR_SIZE; | |
2682 | attr.ia_size = i_size_read(inode); | |
2683 | ||
2684 | attr.ia_file = file; | |
2685 | attr.ia_valid |= ATTR_FILE; | |
2686 | ||
2687 | fuse_do_setattr(inode, &attr, file); | |
2688 | } | |
2689 | ||
e5c5f05d MP |
2690 | static inline loff_t fuse_round_up(loff_t off) |
2691 | { | |
2692 | return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); | |
2693 | } | |
2694 | ||
4273b793 AA |
2695 | static ssize_t |
2696 | fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |
2697 | loff_t offset, unsigned long nr_segs) | |
2698 | { | |
2699 | ssize_t ret = 0; | |
60b9df7a MS |
2700 | struct file *file = iocb->ki_filp; |
2701 | struct fuse_file *ff = file->private_data; | |
e5c5f05d | 2702 | bool async_dio = ff->fc->async_dio; |
4273b793 | 2703 | loff_t pos = 0; |
bcba24cc MP |
2704 | struct inode *inode; |
2705 | loff_t i_size; | |
2706 | size_t count = iov_length(iov, nr_segs); | |
36cf66ed | 2707 | struct fuse_io_priv *io; |
4273b793 | 2708 | |
4273b793 | 2709 | pos = offset; |
bcba24cc MP |
2710 | inode = file->f_mapping->host; |
2711 | i_size = i_size_read(inode); | |
4273b793 | 2712 | |
439ee5f0 | 2713 | /* optimization for short read */ |
e5c5f05d | 2714 | if (async_dio && rw != WRITE && offset + count > i_size) { |
439ee5f0 MP |
2715 | if (offset >= i_size) |
2716 | return 0; | |
e5c5f05d | 2717 | count = min_t(loff_t, count, fuse_round_up(i_size - offset)); |
439ee5f0 MP |
2718 | } |
2719 | ||
bcba24cc | 2720 | io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); |
36cf66ed MP |
2721 | if (!io) |
2722 | return -ENOMEM; | |
bcba24cc MP |
2723 | spin_lock_init(&io->lock); |
2724 | io->reqs = 1; | |
2725 | io->bytes = -1; | |
2726 | io->size = 0; | |
2727 | io->offset = offset; | |
2728 | io->write = (rw == WRITE); | |
2729 | io->err = 0; | |
36cf66ed | 2730 | io->file = file; |
bcba24cc MP |
2731 | /* |
2732 | * By default, we want to optimize all I/Os with async request | |
60b9df7a | 2733 | * submission to the client filesystem if supported. |
bcba24cc | 2734 | */ |
e5c5f05d | 2735 | io->async = async_dio; |
bcba24cc MP |
2736 | io->iocb = iocb; |
2737 | ||
2738 | /* | |
2739 | * We cannot asynchronously extend the size of a file. We have no method | |
2740 | * to wait on real async I/O requests, so we must submit this request | |
2741 | * synchronously. | |
2742 | */ | |
e5c5f05d | 2743 | if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE) |
bcba24cc | 2744 | io->async = false; |
4273b793 | 2745 | |
b98d023a | 2746 | if (rw == WRITE) |
36cf66ed | 2747 | ret = __fuse_direct_write(io, iov, nr_segs, &pos); |
b98d023a | 2748 | else |
439ee5f0 | 2749 | ret = __fuse_direct_read(io, iov, nr_segs, &pos, count); |
36cf66ed | 2750 | |
bcba24cc MP |
2751 | if (io->async) { |
2752 | fuse_aio_complete(io, ret < 0 ? ret : 0, -1); | |
2753 | ||
2754 | /* we have a non-extending, async request, so return */ | |
c9ecf989 | 2755 | if (!is_sync_kiocb(iocb)) |
bcba24cc MP |
2756 | return -EIOCBQUEUED; |
2757 | ||
2758 | ret = wait_on_sync_kiocb(iocb); | |
2759 | } else { | |
2760 | kfree(io); | |
2761 | } | |
2762 | ||
efb9fa9e MP |
2763 | if (rw == WRITE) { |
2764 | if (ret > 0) | |
2765 | fuse_write_update_size(inode, pos); | |
2766 | else if (ret < 0 && offset + count > i_size) | |
2767 | fuse_do_truncate(file); | |
2768 | } | |
4273b793 AA |
2769 | |
2770 | return ret; | |
2771 | } | |
2772 | ||
cdadb11c MS |
2773 | static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, |
2774 | loff_t length) | |
05ba1f08 AP |
2775 | { |
2776 | struct fuse_file *ff = file->private_data; | |
3634a632 | 2777 | struct inode *inode = file->f_inode; |
0ab08f57 | 2778 | struct fuse_inode *fi = get_fuse_inode(inode); |
05ba1f08 AP |
2779 | struct fuse_conn *fc = ff->fc; |
2780 | struct fuse_req *req; | |
2781 | struct fuse_fallocate_in inarg = { | |
2782 | .fh = ff->fh, | |
2783 | .offset = offset, | |
2784 | .length = length, | |
2785 | .mode = mode | |
2786 | }; | |
2787 | int err; | |
14c14414 MP |
2788 | bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || |
2789 | (mode & FALLOC_FL_PUNCH_HOLE); | |
05ba1f08 | 2790 | |
519c6040 MS |
2791 | if (fc->no_fallocate) |
2792 | return -EOPNOTSUPP; | |
2793 | ||
14c14414 | 2794 | if (lock_inode) { |
3634a632 | 2795 | mutex_lock(&inode->i_mutex); |
bde52788 MP |
2796 | if (mode & FALLOC_FL_PUNCH_HOLE) { |
2797 | loff_t endbyte = offset + length - 1; | |
2798 | err = filemap_write_and_wait_range(inode->i_mapping, | |
2799 | offset, endbyte); | |
2800 | if (err) | |
2801 | goto out; | |
2802 | ||
2803 | fuse_sync_writes(inode); | |
2804 | } | |
3634a632 BF |
2805 | } |
2806 | ||
0ab08f57 MP |
2807 | if (!(mode & FALLOC_FL_KEEP_SIZE)) |
2808 | set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); | |
2809 | ||
b111c8c0 | 2810 | req = fuse_get_req_nopages(fc); |
3634a632 BF |
2811 | if (IS_ERR(req)) { |
2812 | err = PTR_ERR(req); | |
2813 | goto out; | |
2814 | } | |
05ba1f08 AP |
2815 | |
2816 | req->in.h.opcode = FUSE_FALLOCATE; | |
2817 | req->in.h.nodeid = ff->nodeid; | |
2818 | req->in.numargs = 1; | |
2819 | req->in.args[0].size = sizeof(inarg); | |
2820 | req->in.args[0].value = &inarg; | |
2821 | fuse_request_send(fc, req); | |
2822 | err = req->out.h.error; | |
519c6040 MS |
2823 | if (err == -ENOSYS) { |
2824 | fc->no_fallocate = 1; | |
2825 | err = -EOPNOTSUPP; | |
2826 | } | |
05ba1f08 AP |
2827 | fuse_put_request(fc, req); |
2828 | ||
bee6c307 BF |
2829 | if (err) |
2830 | goto out; | |
2831 | ||
2832 | /* we could have extended the file */ | |
2833 | if (!(mode & FALLOC_FL_KEEP_SIZE)) | |
2834 | fuse_write_update_size(inode, offset + length); | |
2835 | ||
2836 | if (mode & FALLOC_FL_PUNCH_HOLE) | |
2837 | truncate_pagecache_range(inode, offset, offset + length - 1); | |
2838 | ||
2839 | fuse_invalidate_attr(inode); | |
2840 | ||
3634a632 | 2841 | out: |
0ab08f57 MP |
2842 | if (!(mode & FALLOC_FL_KEEP_SIZE)) |
2843 | clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); | |
2844 | ||
bde52788 | 2845 | if (lock_inode) |
3634a632 | 2846 | mutex_unlock(&inode->i_mutex); |
3634a632 | 2847 | |
05ba1f08 AP |
2848 | return err; |
2849 | } | |
05ba1f08 | 2850 | |
4b6f5d20 | 2851 | static const struct file_operations fuse_file_operations = { |
5559b8f4 | 2852 | .llseek = fuse_file_llseek, |
543ade1f | 2853 | .read = do_sync_read, |
bcb4be80 | 2854 | .aio_read = fuse_file_aio_read, |
543ade1f | 2855 | .write = do_sync_write, |
ea9b9907 | 2856 | .aio_write = fuse_file_aio_write, |
b6aeaded MS |
2857 | .mmap = fuse_file_mmap, |
2858 | .open = fuse_open, | |
2859 | .flush = fuse_flush, | |
2860 | .release = fuse_release, | |
2861 | .fsync = fuse_fsync, | |
71421259 | 2862 | .lock = fuse_file_lock, |
a9ff4f87 | 2863 | .flock = fuse_file_flock, |
5ffc4ef4 | 2864 | .splice_read = generic_file_splice_read, |
59efec7b TH |
2865 | .unlocked_ioctl = fuse_file_ioctl, |
2866 | .compat_ioctl = fuse_file_compat_ioctl, | |
95668a69 | 2867 | .poll = fuse_file_poll, |
05ba1f08 | 2868 | .fallocate = fuse_file_fallocate, |
b6aeaded MS |
2869 | }; |
2870 | ||
4b6f5d20 | 2871 | static const struct file_operations fuse_direct_io_file_operations = { |
5559b8f4 | 2872 | .llseek = fuse_file_llseek, |
413ef8cb MS |
2873 | .read = fuse_direct_read, |
2874 | .write = fuse_direct_write, | |
fc280c96 | 2875 | .mmap = fuse_direct_mmap, |
413ef8cb MS |
2876 | .open = fuse_open, |
2877 | .flush = fuse_flush, | |
2878 | .release = fuse_release, | |
2879 | .fsync = fuse_fsync, | |
71421259 | 2880 | .lock = fuse_file_lock, |
a9ff4f87 | 2881 | .flock = fuse_file_flock, |
59efec7b TH |
2882 | .unlocked_ioctl = fuse_file_ioctl, |
2883 | .compat_ioctl = fuse_file_compat_ioctl, | |
95668a69 | 2884 | .poll = fuse_file_poll, |
05ba1f08 | 2885 | .fallocate = fuse_file_fallocate, |
fc280c96 | 2886 | /* no splice_read */ |
413ef8cb MS |
2887 | }; |
2888 | ||
f5e54d6e | 2889 | static const struct address_space_operations fuse_file_aops = { |
b6aeaded | 2890 | .readpage = fuse_readpage, |
3be5a52b | 2891 | .writepage = fuse_writepage, |
26d614df | 2892 | .writepages = fuse_writepages, |
3be5a52b | 2893 | .launder_page = fuse_launder_page, |
db50b96c | 2894 | .readpages = fuse_readpages, |
3be5a52b | 2895 | .set_page_dirty = __set_page_dirty_nobuffers, |
b2d2272f | 2896 | .bmap = fuse_bmap, |
4273b793 | 2897 | .direct_IO = fuse_direct_IO, |
b6aeaded MS |
2898 | }; |
2899 | ||
2900 | void fuse_init_file_inode(struct inode *inode) | |
2901 | { | |
45323fb7 MS |
2902 | inode->i_fop = &fuse_file_operations; |
2903 | inode->i_data.a_ops = &fuse_file_aops; | |
b6aeaded | 2904 | } |