]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/fuse/dev.c
fuse: fix use after free issue in fuse_dev_do_read()
[mirror_ubuntu-zesty-kernel.git] / fs / fuse / dev.c
CommitLineData
334f485d
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
334f485d
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/uio.h>
15#include <linux/miscdevice.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/slab.h>
dd3bb14f 19#include <linux/pipe_fs_i.h>
ce534fb0
MS
20#include <linux/swap.h>
21#include <linux/splice.h>
334f485d
MS
22
23MODULE_ALIAS_MISCDEV(FUSE_MINOR);
578454ff 24MODULE_ALIAS("devname:fuse");
334f485d 25
e18b890b 26static struct kmem_cache *fuse_req_cachep;
334f485d 27
cc080e9e 28static struct fuse_dev *fuse_get_dev(struct file *file)
334f485d 29{
0720b315
MS
30 /*
31 * Lockless access is OK, because file->private data is set
32 * once during mount and is valid until the file is released.
33 */
cc080e9e 34 return ACCESS_ONCE(file->private_data);
334f485d
MS
35}
36
4250c066 37static void fuse_request_init(struct fuse_req *req, struct page **pages,
b2430d75 38 struct fuse_page_desc *page_descs,
4250c066 39 unsigned npages)
334f485d
MS
40{
41 memset(req, 0, sizeof(*req));
4250c066 42 memset(pages, 0, sizeof(*pages) * npages);
b2430d75 43 memset(page_descs, 0, sizeof(*page_descs) * npages);
334f485d 44 INIT_LIST_HEAD(&req->list);
a4d27e75 45 INIT_LIST_HEAD(&req->intr_entry);
334f485d
MS
46 init_waitqueue_head(&req->waitq);
47 atomic_set(&req->count, 1);
4250c066 48 req->pages = pages;
b2430d75 49 req->page_descs = page_descs;
4250c066 50 req->max_pages = npages;
33e14b4d 51 __set_bit(FR_PENDING, &req->flags);
334f485d
MS
52}
53
4250c066 54static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
334f485d 55{
4250c066
MP
56 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
57 if (req) {
58 struct page **pages;
b2430d75 59 struct fuse_page_desc *page_descs;
4250c066 60
b2430d75 61 if (npages <= FUSE_REQ_INLINE_PAGES) {
4250c066 62 pages = req->inline_pages;
b2430d75
MP
63 page_descs = req->inline_page_descs;
64 } else {
4250c066 65 pages = kmalloc(sizeof(struct page *) * npages, flags);
b2430d75
MP
66 page_descs = kmalloc(sizeof(struct fuse_page_desc) *
67 npages, flags);
68 }
4250c066 69
b2430d75
MP
70 if (!pages || !page_descs) {
71 kfree(pages);
72 kfree(page_descs);
4250c066
MP
73 kmem_cache_free(fuse_req_cachep, req);
74 return NULL;
75 }
76
b2430d75 77 fuse_request_init(req, pages, page_descs, npages);
4250c066 78 }
334f485d
MS
79 return req;
80}
4250c066
MP
81
82struct fuse_req *fuse_request_alloc(unsigned npages)
83{
84 return __fuse_request_alloc(npages, GFP_KERNEL);
85}
08cbf542 86EXPORT_SYMBOL_GPL(fuse_request_alloc);
334f485d 87
4250c066 88struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
3be5a52b 89{
4250c066 90 return __fuse_request_alloc(npages, GFP_NOFS);
3be5a52b
MS
91}
92
334f485d
MS
93void fuse_request_free(struct fuse_req *req)
94{
b2430d75 95 if (req->pages != req->inline_pages) {
4250c066 96 kfree(req->pages);
b2430d75
MP
97 kfree(req->page_descs);
98 }
334f485d
MS
99 kmem_cache_free(fuse_req_cachep, req);
100}
101
36cf66ed 102void __fuse_get_request(struct fuse_req *req)
334f485d
MS
103{
104 atomic_inc(&req->count);
105}
106
107/* Must be called with > 1 refcount */
108static void __fuse_put_request(struct fuse_req *req)
109{
110 BUG_ON(atomic_read(&req->count) < 2);
111 atomic_dec(&req->count);
112}
113
33649c91
MS
114static void fuse_req_init_context(struct fuse_req *req)
115{
499dcf20
EB
116 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
117 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
33649c91
MS
118 req->in.h.pid = current->pid;
119}
120
9759bd51
MS
121void fuse_set_initialized(struct fuse_conn *fc)
122{
123 /* Make sure stores before this are seen on another CPU */
124 smp_wmb();
125 fc->initialized = 1;
126}
127
0aada884
MP
128static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
129{
130 return !fc->initialized || (for_background && fc->blocked);
131}
132
8b41e671
MP
133static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
134 bool for_background)
334f485d 135{
08a53cdc 136 struct fuse_req *req;
08a53cdc 137 int err;
9bc5ddda 138 atomic_inc(&fc->num_waiting);
0aada884
MP
139
140 if (fuse_block_alloc(fc, for_background)) {
0aada884 141 err = -EINTR;
7d3a07fc
AV
142 if (wait_event_killable_exclusive(fc->blocked_waitq,
143 !fuse_block_alloc(fc, for_background)))
0aada884
MP
144 goto out;
145 }
9759bd51
MS
146 /* Matches smp_wmb() in fuse_set_initialized() */
147 smp_rmb();
08a53cdc 148
51eb01e7
MS
149 err = -ENOTCONN;
150 if (!fc->connected)
151 goto out;
152
de155226
MS
153 err = -ECONNREFUSED;
154 if (fc->conn_error)
155 goto out;
156
b111c8c0 157 req = fuse_request_alloc(npages);
9bc5ddda 158 err = -ENOMEM;
722d2bea
MP
159 if (!req) {
160 if (for_background)
161 wake_up(&fc->blocked_waitq);
9bc5ddda 162 goto out;
722d2bea 163 }
334f485d 164
33649c91 165 fuse_req_init_context(req);
825d6d33
MS
166 __set_bit(FR_WAITING, &req->flags);
167 if (for_background)
168 __set_bit(FR_BACKGROUND, &req->flags);
169
334f485d 170 return req;
9bc5ddda
MS
171
172 out:
173 atomic_dec(&fc->num_waiting);
174 return ERR_PTR(err);
334f485d 175}
8b41e671
MP
176
177struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
178{
179 return __fuse_get_req(fc, npages, false);
180}
08cbf542 181EXPORT_SYMBOL_GPL(fuse_get_req);
334f485d 182
8b41e671
MP
183struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
184 unsigned npages)
185{
186 return __fuse_get_req(fc, npages, true);
187}
188EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
189
33649c91
MS
190/*
191 * Return request in fuse_file->reserved_req. However that may
192 * currently be in use. If that is the case, wait for it to become
193 * available.
194 */
195static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
196 struct file *file)
197{
198 struct fuse_req *req = NULL;
199 struct fuse_file *ff = file->private_data;
200
201 do {
de5e3dec 202 wait_event(fc->reserved_req_waitq, ff->reserved_req);
33649c91
MS
203 spin_lock(&fc->lock);
204 if (ff->reserved_req) {
205 req = ff->reserved_req;
206 ff->reserved_req = NULL;
cb0942b8 207 req->stolen_file = get_file(file);
33649c91
MS
208 }
209 spin_unlock(&fc->lock);
210 } while (!req);
211
212 return req;
213}
214
215/*
216 * Put stolen request back into fuse_file->reserved_req
217 */
218static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
219{
220 struct file *file = req->stolen_file;
221 struct fuse_file *ff = file->private_data;
222
223 spin_lock(&fc->lock);
b2430d75 224 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
33649c91
MS
225 BUG_ON(ff->reserved_req);
226 ff->reserved_req = req;
de5e3dec 227 wake_up_all(&fc->reserved_req_waitq);
33649c91
MS
228 spin_unlock(&fc->lock);
229 fput(file);
230}
231
232/*
233 * Gets a requests for a file operation, always succeeds
234 *
235 * This is used for sending the FLUSH request, which must get to
236 * userspace, due to POSIX locks which may need to be unlocked.
237 *
238 * If allocation fails due to OOM, use the reserved request in
239 * fuse_file.
240 *
241 * This is very unlikely to deadlock accidentally, since the
242 * filesystem should not have it's own file open. If deadlock is
243 * intentional, it can still be broken by "aborting" the filesystem.
244 */
b111c8c0
MP
245struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
246 struct file *file)
33649c91
MS
247{
248 struct fuse_req *req;
249
250 atomic_inc(&fc->num_waiting);
0aada884 251 wait_event(fc->blocked_waitq, fc->initialized);
9759bd51
MS
252 /* Matches smp_wmb() in fuse_set_initialized() */
253 smp_rmb();
b111c8c0 254 req = fuse_request_alloc(0);
33649c91
MS
255 if (!req)
256 req = get_reserved_req(fc, file);
257
258 fuse_req_init_context(req);
825d6d33
MS
259 __set_bit(FR_WAITING, &req->flags);
260 __clear_bit(FR_BACKGROUND, &req->flags);
33649c91
MS
261 return req;
262}
263
334f485d 264void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
7128ec2a
MS
265{
266 if (atomic_dec_and_test(&req->count)) {
825d6d33 267 if (test_bit(FR_BACKGROUND, &req->flags)) {
722d2bea
MP
268 /*
269 * We get here in the unlikely case that a background
270 * request was allocated but not sent
271 */
272 spin_lock(&fc->lock);
273 if (!fc->blocked)
274 wake_up(&fc->blocked_waitq);
275 spin_unlock(&fc->lock);
276 }
277
825d6d33
MS
278 if (test_bit(FR_WAITING, &req->flags)) {
279 __clear_bit(FR_WAITING, &req->flags);
9bc5ddda 280 atomic_dec(&fc->num_waiting);
73e0e738 281 }
33649c91
MS
282
283 if (req->stolen_file)
284 put_reserved_req(fc, req);
285 else
286 fuse_request_free(req);
7128ec2a
MS
287 }
288}
08cbf542 289EXPORT_SYMBOL_GPL(fuse_put_request);
7128ec2a 290
d12def1b
MS
291static unsigned len_args(unsigned numargs, struct fuse_arg *args)
292{
293 unsigned nbytes = 0;
294 unsigned i;
295
296 for (i = 0; i < numargs; i++)
297 nbytes += args[i].size;
298
299 return nbytes;
300}
301
f88996a9 302static u64 fuse_get_unique(struct fuse_iqueue *fiq)
d12def1b 303{
f88996a9 304 return ++fiq->reqctr;
d12def1b
MS
305}
306
f88996a9 307static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
d12def1b 308{
d12def1b
MS
309 req->in.h.len = sizeof(struct fuse_in_header) +
310 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
f88996a9 311 list_add_tail(&req->list, &fiq->pending);
4ce60812 312 wake_up_locked(&fiq->waitq);
f88996a9 313 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
d12def1b
MS
314}
315
07e77dca
MS
316void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
317 u64 nodeid, u64 nlookup)
318{
f88996a9
MS
319 struct fuse_iqueue *fiq = &fc->iq;
320
02c048b9
MS
321 forget->forget_one.nodeid = nodeid;
322 forget->forget_one.nlookup = nlookup;
07e77dca 323
4ce60812 324 spin_lock(&fiq->waitq.lock);
e16714d8 325 if (fiq->connected) {
f88996a9
MS
326 fiq->forget_list_tail->next = forget;
327 fiq->forget_list_tail = forget;
4ce60812 328 wake_up_locked(&fiq->waitq);
f88996a9 329 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
5dfcc87f
MS
330 } else {
331 kfree(forget);
332 }
4ce60812 333 spin_unlock(&fiq->waitq.lock);
07e77dca
MS
334}
335
d12def1b
MS
336static void flush_bg_queue(struct fuse_conn *fc)
337{
7a6d3c8b 338 while (fc->active_background < fc->max_background &&
d12def1b
MS
339 !list_empty(&fc->bg_queue)) {
340 struct fuse_req *req;
f88996a9 341 struct fuse_iqueue *fiq = &fc->iq;
d12def1b
MS
342
343 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
344 list_del(&req->list);
345 fc->active_background++;
4ce60812 346 spin_lock(&fiq->waitq.lock);
f88996a9
MS
347 req->in.h.unique = fuse_get_unique(fiq);
348 queue_request(fiq, req);
4ce60812 349 spin_unlock(&fiq->waitq.lock);
d12def1b
MS
350 }
351}
352
334f485d
MS
353/*
354 * This function is called when a request is finished. Either a reply
f9a2842e 355 * has arrived or it was aborted (and not yet sent) or some error
f43b155a 356 * occurred during communication with userspace, or the device file
51eb01e7
MS
357 * was closed. The requester thread is woken up (if still waiting),
358 * the 'end' callback is called if given, else the reference to the
359 * request is released
334f485d
MS
360 */
361static void request_end(struct fuse_conn *fc, struct fuse_req *req)
362{
4ce60812 363 struct fuse_iqueue *fiq = &fc->iq;
365ae710 364
efe2800f 365 if (test_and_set_bit(FR_FINISHED, &req->flags))
365ae710 366 return;
365ae710 367
4ce60812 368 spin_lock(&fiq->waitq.lock);
0d8e84b0 369 list_del_init(&req->intr_entry);
4ce60812 370 spin_unlock(&fiq->waitq.lock);
33e14b4d
MS
371 WARN_ON(test_bit(FR_PENDING, &req->flags));
372 WARN_ON(test_bit(FR_SENT, &req->flags));
825d6d33 373 if (test_bit(FR_BACKGROUND, &req->flags)) {
efe2800f 374 spin_lock(&fc->lock);
825d6d33 375 clear_bit(FR_BACKGROUND, &req->flags);
722d2bea 376 if (fc->num_background == fc->max_background)
51eb01e7 377 fc->blocked = 0;
722d2bea
MP
378
379 /* Wake up next waiter, if any */
3c18ef81 380 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
722d2bea
MP
381 wake_up(&fc->blocked_waitq);
382
7a6d3c8b 383 if (fc->num_background == fc->congestion_threshold &&
a325f9b9 384 fc->connected && fc->bdi_initialized) {
8aa7e847
JA
385 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
386 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
f92b99b9 387 }
51eb01e7 388 fc->num_background--;
d12def1b
MS
389 fc->active_background--;
390 flush_bg_queue(fc);
efe2800f 391 spin_unlock(&fc->lock);
334f485d 392 }
51eb01e7 393 wake_up(&req->waitq);
1e6881c3
MS
394 if (req->end)
395 req->end(fc, req);
e9bb09dd 396 fuse_put_request(fc, req);
334f485d
MS
397}
398
f88996a9 399static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
a4d27e75 400{
4ce60812 401 spin_lock(&fiq->waitq.lock);
6ba4d272
ST
402 if (test_bit(FR_FINISHED, &req->flags)) {
403 spin_unlock(&fiq->waitq.lock);
404 return;
405 }
8f7bb368
MS
406 if (list_empty(&req->intr_entry)) {
407 list_add_tail(&req->intr_entry, &fiq->interrupts);
408 wake_up_locked(&fiq->waitq);
409 }
4ce60812 410 spin_unlock(&fiq->waitq.lock);
f88996a9 411 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
a4d27e75
MS
412}
413
7c352bdf 414static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
334f485d 415{
4ce60812 416 struct fuse_iqueue *fiq = &fc->iq;
c4775267
MS
417 int err;
418
a4d27e75
MS
419 if (!fc->no_interrupt) {
420 /* Any signal may interrupt this */
c4775267 421 err = wait_event_interruptible(req->waitq,
33e14b4d 422 test_bit(FR_FINISHED, &req->flags));
c4775267 423 if (!err)
a4d27e75
MS
424 return;
425
825d6d33 426 set_bit(FR_INTERRUPTED, &req->flags);
8f7bb368
MS
427 /* matches barrier in fuse_dev_do_read() */
428 smp_mb__after_atomic();
33e14b4d 429 if (test_bit(FR_SENT, &req->flags))
4ce60812 430 queue_interrupt(fiq, req);
a4d27e75
MS
431 }
432
825d6d33 433 if (!test_bit(FR_FORCE, &req->flags)) {
a4d27e75 434 /* Only fatal signals may interrupt this */
7d3a07fc 435 err = wait_event_killable(req->waitq,
33e14b4d 436 test_bit(FR_FINISHED, &req->flags));
c4775267 437 if (!err)
a131de0a
MS
438 return;
439
4ce60812 440 spin_lock(&fiq->waitq.lock);
a131de0a 441 /* Request is not yet in userspace, bail out */
33e14b4d 442 if (test_bit(FR_PENDING, &req->flags)) {
a131de0a 443 list_del(&req->list);
4ce60812 444 spin_unlock(&fiq->waitq.lock);
a131de0a
MS
445 __fuse_put_request(req);
446 req->out.h.error = -EINTR;
447 return;
448 }
4ce60812 449 spin_unlock(&fiq->waitq.lock);
51eb01e7 450 }
334f485d 451
a131de0a
MS
452 /*
453 * Either request is already in userspace, or it was forced.
454 * Wait it out.
455 */
33e14b4d 456 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
334f485d
MS
457}
458
6a4e922c 459static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
334f485d 460{
e16714d8
MS
461 struct fuse_iqueue *fiq = &fc->iq;
462
825d6d33 463 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
4ce60812 464 spin_lock(&fiq->waitq.lock);
e16714d8 465 if (!fiq->connected) {
4ce60812 466 spin_unlock(&fiq->waitq.lock);
334f485d 467 req->out.h.error = -ENOTCONN;
c4775267 468 } else {
f88996a9
MS
469 req->in.h.unique = fuse_get_unique(fiq);
470 queue_request(fiq, req);
334f485d
MS
471 /* acquire extra reference, since request is still needed
472 after request_end() */
473 __fuse_get_request(req);
4ce60812 474 spin_unlock(&fiq->waitq.lock);
334f485d 475
7c352bdf 476 request_wait_answer(fc, req);
c4775267
MS
477 /* Pairs with smp_wmb() in request_end() */
478 smp_rmb();
334f485d 479 }
334f485d 480}
6a4e922c
EW
481
482void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
483{
825d6d33
MS
484 __set_bit(FR_ISREPLY, &req->flags);
485 if (!test_bit(FR_WAITING, &req->flags)) {
486 __set_bit(FR_WAITING, &req->flags);
5437f241
MS
487 atomic_inc(&fc->num_waiting);
488 }
6a4e922c
EW
489 __fuse_request_send(fc, req);
490}
08cbf542 491EXPORT_SYMBOL_GPL(fuse_request_send);
334f485d 492
21f62174
MS
493static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
494{
495 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
496 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
497
498 if (fc->minor < 9) {
499 switch (args->in.h.opcode) {
500 case FUSE_LOOKUP:
501 case FUSE_CREATE:
502 case FUSE_MKNOD:
503 case FUSE_MKDIR:
504 case FUSE_SYMLINK:
505 case FUSE_LINK:
506 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
507 break;
508 case FUSE_GETATTR:
509 case FUSE_SETATTR:
510 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
511 break;
512 }
513 }
514 if (fc->minor < 12) {
515 switch (args->in.h.opcode) {
516 case FUSE_CREATE:
517 args->in.args[0].size = sizeof(struct fuse_open_in);
518 break;
519 case FUSE_MKNOD:
520 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
521 break;
522 }
523 }
524}
525
7078187a
MS
526ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
527{
528 struct fuse_req *req;
529 ssize_t ret;
530
531 req = fuse_get_req(fc, 0);
532 if (IS_ERR(req))
533 return PTR_ERR(req);
534
21f62174
MS
535 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
536 fuse_adjust_compat(fc, args);
537
7078187a
MS
538 req->in.h.opcode = args->in.h.opcode;
539 req->in.h.nodeid = args->in.h.nodeid;
540 req->in.numargs = args->in.numargs;
541 memcpy(req->in.args, args->in.args,
542 args->in.numargs * sizeof(struct fuse_in_arg));
543 req->out.argvar = args->out.argvar;
544 req->out.numargs = args->out.numargs;
545 memcpy(req->out.args, args->out.args,
546 args->out.numargs * sizeof(struct fuse_arg));
547 fuse_request_send(fc, req);
548 ret = req->out.h.error;
549 if (!ret && args->out.argvar) {
550 BUG_ON(args->out.numargs != 1);
551 ret = req->out.args[0].size;
552 }
553 fuse_put_request(fc, req);
554
555 return ret;
556}
557
f0139aa8
MS
558/*
559 * Called under fc->lock
560 *
561 * fc->connected must have been checked previously
562 */
563void fuse_request_send_background_locked(struct fuse_conn *fc,
564 struct fuse_req *req)
d12def1b 565{
825d6d33
MS
566 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
567 if (!test_bit(FR_WAITING, &req->flags)) {
568 __set_bit(FR_WAITING, &req->flags);
5437f241
MS
569 atomic_inc(&fc->num_waiting);
570 }
825d6d33 571 __set_bit(FR_ISREPLY, &req->flags);
d12def1b 572 fc->num_background++;
7a6d3c8b 573 if (fc->num_background == fc->max_background)
d12def1b 574 fc->blocked = 1;
7a6d3c8b 575 if (fc->num_background == fc->congestion_threshold &&
a325f9b9 576 fc->bdi_initialized) {
8aa7e847
JA
577 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
578 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
d12def1b
MS
579 }
580 list_add_tail(&req->list, &fc->bg_queue);
581 flush_bg_queue(fc);
582}
583
f0139aa8 584void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
334f485d 585{
42dc6211 586 BUG_ON(!req->end);
d7133114 587 spin_lock(&fc->lock);
1e9a4ed9 588 if (fc->connected) {
f0139aa8 589 fuse_request_send_background_locked(fc, req);
d7133114 590 spin_unlock(&fc->lock);
334f485d 591 } else {
42dc6211 592 spin_unlock(&fc->lock);
334f485d 593 req->out.h.error = -ENOTCONN;
42dc6211
MS
594 req->end(fc, req);
595 fuse_put_request(fc, req);
334f485d
MS
596 }
597}
08cbf542 598EXPORT_SYMBOL_GPL(fuse_request_send_background);
334f485d 599
2d45ba38
MS
600static int fuse_request_send_notify_reply(struct fuse_conn *fc,
601 struct fuse_req *req, u64 unique)
602{
603 int err = -ENODEV;
f88996a9 604 struct fuse_iqueue *fiq = &fc->iq;
2d45ba38 605
825d6d33 606 __clear_bit(FR_ISREPLY, &req->flags);
2d45ba38 607 req->in.h.unique = unique;
4ce60812 608 spin_lock(&fiq->waitq.lock);
e16714d8 609 if (fiq->connected) {
f88996a9 610 queue_request(fiq, req);
2d45ba38
MS
611 err = 0;
612 }
4ce60812 613 spin_unlock(&fiq->waitq.lock);
2d45ba38
MS
614
615 return err;
616}
617
0b05b183
AA
618void fuse_force_forget(struct file *file, u64 nodeid)
619{
6131ffaa 620 struct inode *inode = file_inode(file);
0b05b183
AA
621 struct fuse_conn *fc = get_fuse_conn(inode);
622 struct fuse_req *req;
623 struct fuse_forget_in inarg;
624
625 memset(&inarg, 0, sizeof(inarg));
626 inarg.nlookup = 1;
b111c8c0 627 req = fuse_get_req_nofail_nopages(fc, file);
0b05b183
AA
628 req->in.h.opcode = FUSE_FORGET;
629 req->in.h.nodeid = nodeid;
630 req->in.numargs = 1;
631 req->in.args[0].size = sizeof(inarg);
632 req->in.args[0].value = &inarg;
825d6d33 633 __clear_bit(FR_ISREPLY, &req->flags);
6a4e922c
EW
634 __fuse_request_send(fc, req);
635 /* ignore errors */
636 fuse_put_request(fc, req);
0b05b183
AA
637}
638
334f485d
MS
639/*
640 * Lock the request. Up to the next unlock_request() there mustn't be
641 * anything that could cause a page-fault. If the request was already
f9a2842e 642 * aborted bail out.
334f485d 643 */
dc00809a 644static int lock_request(struct fuse_req *req)
334f485d
MS
645{
646 int err = 0;
647 if (req) {
dc00809a 648 spin_lock(&req->waitq.lock);
825d6d33 649 if (test_bit(FR_ABORTED, &req->flags))
334f485d
MS
650 err = -ENOENT;
651 else
825d6d33 652 set_bit(FR_LOCKED, &req->flags);
dc00809a 653 spin_unlock(&req->waitq.lock);
334f485d
MS
654 }
655 return err;
656}
657
658/*
0d8e84b0
MS
659 * Unlock request. If it was aborted while locked, caller is responsible
660 * for unlocking and ending the request.
334f485d 661 */
dc00809a 662static int unlock_request(struct fuse_req *req)
334f485d 663{
0d8e84b0 664 int err = 0;
334f485d 665 if (req) {
dc00809a 666 spin_lock(&req->waitq.lock);
825d6d33 667 if (test_bit(FR_ABORTED, &req->flags))
0d8e84b0
MS
668 err = -ENOENT;
669 else
825d6d33 670 clear_bit(FR_LOCKED, &req->flags);
dc00809a 671 spin_unlock(&req->waitq.lock);
334f485d 672 }
0d8e84b0 673 return err;
334f485d
MS
674}
675
676struct fuse_copy_state {
677 int write;
678 struct fuse_req *req;
6c09e94a 679 struct iov_iter *iter;
dd3bb14f
MS
680 struct pipe_buffer *pipebufs;
681 struct pipe_buffer *currbuf;
682 struct pipe_inode_info *pipe;
334f485d 683 unsigned long nr_segs;
334f485d 684 struct page *pg;
334f485d 685 unsigned len;
c55a01d3 686 unsigned offset;
ce534fb0 687 unsigned move_pages:1;
334f485d
MS
688};
689
dc00809a 690static void fuse_copy_init(struct fuse_copy_state *cs, int write,
6c09e94a 691 struct iov_iter *iter)
334f485d
MS
692{
693 memset(cs, 0, sizeof(*cs));
694 cs->write = write;
6c09e94a 695 cs->iter = iter;
334f485d
MS
696}
697
698/* Unmap and put previous page of userspace buffer */
8bfc016d 699static void fuse_copy_finish(struct fuse_copy_state *cs)
334f485d 700{
dd3bb14f
MS
701 if (cs->currbuf) {
702 struct pipe_buffer *buf = cs->currbuf;
703
c55a01d3 704 if (cs->write)
c3021629 705 buf->len = PAGE_SIZE - cs->len;
dd3bb14f 706 cs->currbuf = NULL;
c55a01d3 707 } else if (cs->pg) {
334f485d
MS
708 if (cs->write) {
709 flush_dcache_page(cs->pg);
710 set_page_dirty_lock(cs->pg);
711 }
712 put_page(cs->pg);
334f485d 713 }
c55a01d3 714 cs->pg = NULL;
334f485d
MS
715}
716
717/*
718 * Get another pagefull of userspace buffer, and map it to kernel
719 * address space, and lock request
720 */
721static int fuse_copy_fill(struct fuse_copy_state *cs)
722{
c55a01d3 723 struct page *page;
334f485d
MS
724 int err;
725
dc00809a 726 err = unlock_request(cs->req);
0d8e84b0
MS
727 if (err)
728 return err;
729
334f485d 730 fuse_copy_finish(cs);
dd3bb14f
MS
731 if (cs->pipebufs) {
732 struct pipe_buffer *buf = cs->pipebufs;
733
c3021629 734 if (!cs->write) {
fba597db 735 err = pipe_buf_confirm(cs->pipe, buf);
c3021629
MS
736 if (err)
737 return err;
738
739 BUG_ON(!cs->nr_segs);
740 cs->currbuf = buf;
c55a01d3
MS
741 cs->pg = buf->page;
742 cs->offset = buf->offset;
c3021629 743 cs->len = buf->len;
c3021629
MS
744 cs->pipebufs++;
745 cs->nr_segs--;
746 } else {
c3021629
MS
747 if (cs->nr_segs == cs->pipe->buffers)
748 return -EIO;
749
750 page = alloc_page(GFP_HIGHUSER);
751 if (!page)
752 return -ENOMEM;
753
754 buf->page = page;
755 buf->offset = 0;
756 buf->len = 0;
757
758 cs->currbuf = buf;
c55a01d3
MS
759 cs->pg = page;
760 cs->offset = 0;
c3021629
MS
761 cs->len = PAGE_SIZE;
762 cs->pipebufs++;
763 cs->nr_segs++;
764 }
dd3bb14f 765 } else {
6c09e94a
AV
766 size_t off;
767 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
dd3bb14f
MS
768 if (err < 0)
769 return err;
6c09e94a
AV
770 BUG_ON(!err);
771 cs->len = err;
772 cs->offset = off;
c55a01d3 773 cs->pg = page;
6c09e94a 774 iov_iter_advance(cs->iter, err);
334f485d 775 }
334f485d 776
dc00809a 777 return lock_request(cs->req);
334f485d
MS
778}
779
780/* Do as much copy to/from userspace buffer as we can */
8bfc016d 781static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
334f485d
MS
782{
783 unsigned ncpy = min(*size, cs->len);
784 if (val) {
c55a01d3
MS
785 void *pgaddr = kmap_atomic(cs->pg);
786 void *buf = pgaddr + cs->offset;
787
334f485d 788 if (cs->write)
c55a01d3 789 memcpy(buf, *val, ncpy);
334f485d 790 else
c55a01d3
MS
791 memcpy(*val, buf, ncpy);
792
793 kunmap_atomic(pgaddr);
334f485d
MS
794 *val += ncpy;
795 }
796 *size -= ncpy;
797 cs->len -= ncpy;
c55a01d3 798 cs->offset += ncpy;
334f485d
MS
799 return ncpy;
800}
801
ce534fb0
MS
802static int fuse_check_page(struct page *page)
803{
804 if (page_mapcount(page) ||
805 page->mapping != NULL ||
806 page_count(page) != 1 ||
807 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
808 ~(1 << PG_locked |
809 1 << PG_referenced |
810 1 << PG_uptodate |
811 1 << PG_lru |
812 1 << PG_active |
813 1 << PG_reclaim))) {
814 printk(KERN_WARNING "fuse: trying to steal weird page\n");
815 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
816 return 1;
817 }
818 return 0;
819}
820
821static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
822{
823 int err;
824 struct page *oldpage = *pagep;
825 struct page *newpage;
826 struct pipe_buffer *buf = cs->pipebufs;
ce534fb0 827
dc00809a 828 err = unlock_request(cs->req);
0d8e84b0
MS
829 if (err)
830 return err;
831
ce534fb0
MS
832 fuse_copy_finish(cs);
833
fba597db 834 err = pipe_buf_confirm(cs->pipe, buf);
ce534fb0
MS
835 if (err)
836 return err;
837
838 BUG_ON(!cs->nr_segs);
839 cs->currbuf = buf;
840 cs->len = buf->len;
841 cs->pipebufs++;
842 cs->nr_segs--;
843
844 if (cs->len != PAGE_SIZE)
845 goto out_fallback;
846
ca76f5b6 847 if (pipe_buf_steal(cs->pipe, buf) != 0)
ce534fb0
MS
848 goto out_fallback;
849
850 newpage = buf->page;
851
aa991b3b
MS
852 if (!PageUptodate(newpage))
853 SetPageUptodate(newpage);
ce534fb0
MS
854
855 ClearPageMappedToDisk(newpage);
856
857 if (fuse_check_page(newpage) != 0)
858 goto out_fallback_unlock;
859
ce534fb0
MS
860 /*
861 * This is a new and locked page, it shouldn't be mapped or
862 * have any special flags on it
863 */
864 if (WARN_ON(page_mapped(oldpage)))
865 goto out_fallback_unlock;
866 if (WARN_ON(page_has_private(oldpage)))
867 goto out_fallback_unlock;
868 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
869 goto out_fallback_unlock;
870 if (WARN_ON(PageMlocked(oldpage)))
871 goto out_fallback_unlock;
872
ef6a3c63 873 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
ce534fb0 874 if (err) {
ef6a3c63
MS
875 unlock_page(newpage);
876 return err;
ce534fb0 877 }
ef6a3c63 878
09cbfeaf 879 get_page(newpage);
ce534fb0
MS
880
881 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
882 lru_cache_add_file(newpage);
883
884 err = 0;
dc00809a 885 spin_lock(&cs->req->waitq.lock);
825d6d33 886 if (test_bit(FR_ABORTED, &cs->req->flags))
ce534fb0
MS
887 err = -ENOENT;
888 else
889 *pagep = newpage;
dc00809a 890 spin_unlock(&cs->req->waitq.lock);
ce534fb0
MS
891
892 if (err) {
893 unlock_page(newpage);
09cbfeaf 894 put_page(newpage);
ce534fb0
MS
895 return err;
896 }
897
898 unlock_page(oldpage);
09cbfeaf 899 put_page(oldpage);
ce534fb0
MS
900 cs->len = 0;
901
902 return 0;
903
904out_fallback_unlock:
905 unlock_page(newpage);
906out_fallback:
c55a01d3
MS
907 cs->pg = buf->page;
908 cs->offset = buf->offset;
ce534fb0 909
dc00809a 910 err = lock_request(cs->req);
ce534fb0
MS
911 if (err)
912 return err;
913
914 return 1;
915}
916
c3021629
MS
917static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
918 unsigned offset, unsigned count)
919{
920 struct pipe_buffer *buf;
0d8e84b0 921 int err;
c3021629
MS
922
923 if (cs->nr_segs == cs->pipe->buffers)
924 return -EIO;
925
dc00809a 926 err = unlock_request(cs->req);
0d8e84b0
MS
927 if (err)
928 return err;
929
c3021629
MS
930 fuse_copy_finish(cs);
931
932 buf = cs->pipebufs;
09cbfeaf 933 get_page(page);
c3021629
MS
934 buf->page = page;
935 buf->offset = offset;
936 buf->len = count;
937
938 cs->pipebufs++;
939 cs->nr_segs++;
940 cs->len = 0;
941
942 return 0;
943}
944
334f485d
MS
945/*
946 * Copy a page in the request to/from the userspace buffer. Must be
947 * done atomically
948 */
ce534fb0 949static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
8bfc016d 950 unsigned offset, unsigned count, int zeroing)
334f485d 951{
ce534fb0
MS
952 int err;
953 struct page *page = *pagep;
954
b6777c40
MS
955 if (page && zeroing && count < PAGE_SIZE)
956 clear_highpage(page);
957
334f485d 958 while (count) {
c3021629
MS
959 if (cs->write && cs->pipebufs && page) {
960 return fuse_ref_page(cs, page, offset, count);
961 } else if (!cs->len) {
ce534fb0
MS
962 if (cs->move_pages && page &&
963 offset == 0 && count == PAGE_SIZE) {
964 err = fuse_try_move_page(cs, pagep);
965 if (err <= 0)
966 return err;
967 } else {
968 err = fuse_copy_fill(cs);
969 if (err)
970 return err;
971 }
1729a16c 972 }
334f485d 973 if (page) {
2408f6ef 974 void *mapaddr = kmap_atomic(page);
334f485d
MS
975 void *buf = mapaddr + offset;
976 offset += fuse_copy_do(cs, &buf, &count);
2408f6ef 977 kunmap_atomic(mapaddr);
334f485d
MS
978 } else
979 offset += fuse_copy_do(cs, NULL, &count);
980 }
981 if (page && !cs->write)
982 flush_dcache_page(page);
983 return 0;
984}
985
986/* Copy pages in the request to/from userspace buffer */
987static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
988 int zeroing)
989{
990 unsigned i;
991 struct fuse_req *req = cs->req;
334f485d
MS
992
993 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
ce534fb0 994 int err;
85f40aec
MP
995 unsigned offset = req->page_descs[i].offset;
996 unsigned count = min(nbytes, req->page_descs[i].length);
ce534fb0
MS
997
998 err = fuse_copy_page(cs, &req->pages[i], offset, count,
999 zeroing);
334f485d
MS
1000 if (err)
1001 return err;
1002
1003 nbytes -= count;
334f485d
MS
1004 }
1005 return 0;
1006}
1007
1008/* Copy a single argument in the request to/from userspace buffer */
1009static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1010{
1011 while (size) {
1729a16c
MS
1012 if (!cs->len) {
1013 int err = fuse_copy_fill(cs);
1014 if (err)
1015 return err;
1016 }
334f485d
MS
1017 fuse_copy_do(cs, &val, &size);
1018 }
1019 return 0;
1020}
1021
1022/* Copy request arguments to/from userspace buffer */
1023static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1024 unsigned argpages, struct fuse_arg *args,
1025 int zeroing)
1026{
1027 int err = 0;
1028 unsigned i;
1029
1030 for (i = 0; !err && i < numargs; i++) {
1031 struct fuse_arg *arg = &args[i];
1032 if (i == numargs - 1 && argpages)
1033 err = fuse_copy_pages(cs, arg->size, zeroing);
1034 else
1035 err = fuse_copy_one(cs, arg->value, arg->size);
1036 }
1037 return err;
1038}
1039
f88996a9 1040static int forget_pending(struct fuse_iqueue *fiq)
07e77dca 1041{
f88996a9 1042 return fiq->forget_list_head.next != NULL;
07e77dca
MS
1043}
1044
f88996a9 1045static int request_pending(struct fuse_iqueue *fiq)
a4d27e75 1046{
f88996a9
MS
1047 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1048 forget_pending(fiq);
a4d27e75
MS
1049}
1050
a4d27e75
MS
1051/*
1052 * Transfer an interrupt request to userspace
1053 *
1054 * Unlike other requests this is assembled on demand, without a need
1055 * to allocate a separate fuse_req structure.
1056 *
fd22d62e 1057 * Called with fiq->waitq.lock held, releases it
a4d27e75 1058 */
fd22d62e
MS
1059static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1060 struct fuse_copy_state *cs,
c3021629 1061 size_t nbytes, struct fuse_req *req)
fd22d62e 1062__releases(fiq->waitq.lock)
a4d27e75 1063{
a4d27e75
MS
1064 struct fuse_in_header ih;
1065 struct fuse_interrupt_in arg;
1066 unsigned reqsize = sizeof(ih) + sizeof(arg);
1067 int err;
1068
1069 list_del_init(&req->intr_entry);
4ce60812 1070 req->intr_unique = fuse_get_unique(fiq);
a4d27e75
MS
1071 memset(&ih, 0, sizeof(ih));
1072 memset(&arg, 0, sizeof(arg));
1073 ih.len = reqsize;
1074 ih.opcode = FUSE_INTERRUPT;
1075 ih.unique = req->intr_unique;
1076 arg.unique = req->in.h.unique;
1077
4ce60812 1078 spin_unlock(&fiq->waitq.lock);
c3021629 1079 if (nbytes < reqsize)
a4d27e75
MS
1080 return -EINVAL;
1081
c3021629 1082 err = fuse_copy_one(cs, &ih, sizeof(ih));
a4d27e75 1083 if (!err)
c3021629
MS
1084 err = fuse_copy_one(cs, &arg, sizeof(arg));
1085 fuse_copy_finish(cs);
a4d27e75
MS
1086
1087 return err ? err : reqsize;
1088}
1089
f88996a9 1090static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
02c048b9
MS
1091 unsigned max,
1092 unsigned *countp)
07e77dca 1093{
f88996a9 1094 struct fuse_forget_link *head = fiq->forget_list_head.next;
02c048b9
MS
1095 struct fuse_forget_link **newhead = &head;
1096 unsigned count;
07e77dca 1097
02c048b9
MS
1098 for (count = 0; *newhead != NULL && count < max; count++)
1099 newhead = &(*newhead)->next;
1100
f88996a9 1101 fiq->forget_list_head.next = *newhead;
02c048b9 1102 *newhead = NULL;
f88996a9
MS
1103 if (fiq->forget_list_head.next == NULL)
1104 fiq->forget_list_tail = &fiq->forget_list_head;
07e77dca 1105
02c048b9
MS
1106 if (countp != NULL)
1107 *countp = count;
1108
1109 return head;
07e77dca
MS
1110}
1111
fd22d62e 1112static int fuse_read_single_forget(struct fuse_iqueue *fiq,
07e77dca
MS
1113 struct fuse_copy_state *cs,
1114 size_t nbytes)
fd22d62e 1115__releases(fiq->waitq.lock)
07e77dca
MS
1116{
1117 int err;
f88996a9 1118 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
07e77dca 1119 struct fuse_forget_in arg = {
02c048b9 1120 .nlookup = forget->forget_one.nlookup,
07e77dca
MS
1121 };
1122 struct fuse_in_header ih = {
1123 .opcode = FUSE_FORGET,
02c048b9 1124 .nodeid = forget->forget_one.nodeid,
f88996a9 1125 .unique = fuse_get_unique(fiq),
07e77dca
MS
1126 .len = sizeof(ih) + sizeof(arg),
1127 };
1128
4ce60812 1129 spin_unlock(&fiq->waitq.lock);
07e77dca
MS
1130 kfree(forget);
1131 if (nbytes < ih.len)
1132 return -EINVAL;
1133
1134 err = fuse_copy_one(cs, &ih, sizeof(ih));
1135 if (!err)
1136 err = fuse_copy_one(cs, &arg, sizeof(arg));
1137 fuse_copy_finish(cs);
1138
1139 if (err)
1140 return err;
1141
1142 return ih.len;
1143}
1144
fd22d62e 1145static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
02c048b9 1146 struct fuse_copy_state *cs, size_t nbytes)
fd22d62e 1147__releases(fiq->waitq.lock)
02c048b9
MS
1148{
1149 int err;
1150 unsigned max_forgets;
1151 unsigned count;
1152 struct fuse_forget_link *head;
1153 struct fuse_batch_forget_in arg = { .count = 0 };
1154 struct fuse_in_header ih = {
1155 .opcode = FUSE_BATCH_FORGET,
f88996a9 1156 .unique = fuse_get_unique(fiq),
02c048b9
MS
1157 .len = sizeof(ih) + sizeof(arg),
1158 };
1159
1160 if (nbytes < ih.len) {
4ce60812 1161 spin_unlock(&fiq->waitq.lock);
02c048b9
MS
1162 return -EINVAL;
1163 }
1164
1165 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
f88996a9 1166 head = dequeue_forget(fiq, max_forgets, &count);
4ce60812 1167 spin_unlock(&fiq->waitq.lock);
02c048b9
MS
1168
1169 arg.count = count;
1170 ih.len += count * sizeof(struct fuse_forget_one);
1171 err = fuse_copy_one(cs, &ih, sizeof(ih));
1172 if (!err)
1173 err = fuse_copy_one(cs, &arg, sizeof(arg));
1174
1175 while (head) {
1176 struct fuse_forget_link *forget = head;
1177
1178 if (!err) {
1179 err = fuse_copy_one(cs, &forget->forget_one,
1180 sizeof(forget->forget_one));
1181 }
1182 head = forget->next;
1183 kfree(forget);
1184 }
1185
1186 fuse_copy_finish(cs);
1187
1188 if (err)
1189 return err;
1190
1191 return ih.len;
1192}
1193
fd22d62e
MS
1194static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1195 struct fuse_copy_state *cs,
02c048b9 1196 size_t nbytes)
fd22d62e 1197__releases(fiq->waitq.lock)
02c048b9 1198{
f88996a9 1199 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
fd22d62e 1200 return fuse_read_single_forget(fiq, cs, nbytes);
02c048b9 1201 else
fd22d62e 1202 return fuse_read_batch_forget(fiq, cs, nbytes);
02c048b9
MS
1203}
1204
334f485d
MS
1205/*
1206 * Read a single request into the userspace filesystem's buffer. This
1207 * function waits until a request is available, then removes it from
1208 * the pending list and copies request data to userspace buffer. If
f9a2842e
MS
1209 * no reply is needed (FORGET) or request has been aborted or there
1210 * was an error during the copying then it's finished by calling
334f485d
MS
1211 * request_end(). Otherwise add it to the processing list, and set
1212 * the 'sent' flag.
1213 */
c3696046 1214static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
c3021629 1215 struct fuse_copy_state *cs, size_t nbytes)
334f485d 1216{
82cbdcd3 1217 ssize_t err;
c3696046 1218 struct fuse_conn *fc = fud->fc;
f88996a9 1219 struct fuse_iqueue *fiq = &fc->iq;
c3696046 1220 struct fuse_pqueue *fpq = &fud->pq;
334f485d
MS
1221 struct fuse_req *req;
1222 struct fuse_in *in;
334f485d
MS
1223 unsigned reqsize;
1224
1d3d752b 1225 restart:
4ce60812 1226 spin_lock(&fiq->waitq.lock);
e5ac1d1e 1227 err = -EAGAIN;
e16714d8 1228 if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
f88996a9 1229 !request_pending(fiq))
e5ac1d1e
JD
1230 goto err_unlock;
1231
5250921b
MS
1232 err = wait_event_interruptible_exclusive_locked(fiq->waitq,
1233 !fiq->connected || request_pending(fiq));
1234 if (err)
1235 goto err_unlock;
1236
334f485d 1237 err = -ENODEV;
e16714d8 1238 if (!fiq->connected)
334f485d 1239 goto err_unlock;
334f485d 1240
f88996a9
MS
1241 if (!list_empty(&fiq->interrupts)) {
1242 req = list_entry(fiq->interrupts.next, struct fuse_req,
a4d27e75 1243 intr_entry);
fd22d62e 1244 return fuse_read_interrupt(fiq, cs, nbytes, req);
a4d27e75
MS
1245 }
1246
f88996a9
MS
1247 if (forget_pending(fiq)) {
1248 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
fd22d62e 1249 return fuse_read_forget(fc, fiq, cs, nbytes);
07e77dca 1250
f88996a9
MS
1251 if (fiq->forget_batch <= -8)
1252 fiq->forget_batch = 16;
07e77dca
MS
1253 }
1254
f88996a9 1255 req = list_entry(fiq->pending.next, struct fuse_req, list);
33e14b4d 1256 clear_bit(FR_PENDING, &req->flags);
ef759258 1257 list_del_init(&req->list);
4ce60812
MS
1258 spin_unlock(&fiq->waitq.lock);
1259
334f485d 1260 in = &req->in;
1d3d752b
MS
1261 reqsize = in->h.len;
1262 /* If request is too large, reply with an error and restart the read */
c3021629 1263 if (nbytes < reqsize) {
1d3d752b
MS
1264 req->out.h.error = -EIO;
1265 /* SETXATTR is special, since it may contain too large data */
1266 if (in->h.opcode == FUSE_SETXATTR)
1267 req->out.h.error = -E2BIG;
1268 request_end(fc, req);
1269 goto restart;
334f485d 1270 }
45a91cb1 1271 spin_lock(&fpq->lock);
82cbdcd3 1272 list_add(&req->list, &fpq->io);
45a91cb1 1273 spin_unlock(&fpq->lock);
c3021629
MS
1274 cs->req = req;
1275 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1d3d752b 1276 if (!err)
c3021629 1277 err = fuse_copy_args(cs, in->numargs, in->argpages,
1d3d752b 1278 (struct fuse_arg *) in->args, 0);
c3021629 1279 fuse_copy_finish(cs);
45a91cb1 1280 spin_lock(&fpq->lock);
825d6d33 1281 clear_bit(FR_LOCKED, &req->flags);
e96edd94 1282 if (!fpq->connected) {
82cbdcd3
MS
1283 err = -ENODEV;
1284 goto out_end;
c9c9d7df 1285 }
334f485d 1286 if (err) {
c9c9d7df 1287 req->out.h.error = -EIO;
82cbdcd3 1288 goto out_end;
334f485d 1289 }
825d6d33 1290 if (!test_bit(FR_ISREPLY, &req->flags)) {
82cbdcd3
MS
1291 err = reqsize;
1292 goto out_end;
334f485d 1293 }
82cbdcd3 1294 list_move_tail(&req->list, &fpq->processing);
45a91cb1 1295 spin_unlock(&fpq->lock);
82cbdcd3
MS
1296 set_bit(FR_SENT, &req->flags);
1297 /* matches barrier in request_wait_answer() */
1298 smp_mb__after_atomic();
1299 if (test_bit(FR_INTERRUPTED, &req->flags))
1300 queue_interrupt(fiq, req);
82cbdcd3 1301
334f485d
MS
1302 return reqsize;
1303
82cbdcd3 1304out_end:
77cd9d48
MS
1305 if (!test_bit(FR_PRIVATE, &req->flags))
1306 list_del_init(&req->list);
45a91cb1 1307 spin_unlock(&fpq->lock);
82cbdcd3
MS
1308 request_end(fc, req);
1309 return err;
1310
334f485d 1311 err_unlock:
4ce60812 1312 spin_unlock(&fiq->waitq.lock);
334f485d
MS
1313 return err;
1314}
1315
94e4fe2c
TVB
1316static int fuse_dev_open(struct inode *inode, struct file *file)
1317{
1318 /*
1319 * The fuse device's file's private_data is used to hold
1320 * the fuse_conn(ection) when it is mounted, and is used to
1321 * keep track of whether the file has been mounted already.
1322 */
1323 file->private_data = NULL;
1324 return 0;
1325}
1326
fbdbacca 1327static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
c3021629
MS
1328{
1329 struct fuse_copy_state cs;
1330 struct file *file = iocb->ki_filp;
cc080e9e
MS
1331 struct fuse_dev *fud = fuse_get_dev(file);
1332
1333 if (!fud)
c3021629
MS
1334 return -EPERM;
1335
fbdbacca
AV
1336 if (!iter_is_iovec(to))
1337 return -EINVAL;
1338
dc00809a 1339 fuse_copy_init(&cs, 1, to);
c3021629 1340
c3696046 1341 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
c3021629
MS
1342}
1343
c3021629
MS
1344static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1345 struct pipe_inode_info *pipe,
1346 size_t len, unsigned int flags)
1347{
d82718e3 1348 int total, ret;
c3021629 1349 int page_nr = 0;
c3021629
MS
1350 struct pipe_buffer *bufs;
1351 struct fuse_copy_state cs;
cc080e9e
MS
1352 struct fuse_dev *fud = fuse_get_dev(in);
1353
1354 if (!fud)
c3021629
MS
1355 return -EPERM;
1356
07e77dca 1357 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
c3021629
MS
1358 if (!bufs)
1359 return -ENOMEM;
1360
dc00809a 1361 fuse_copy_init(&cs, 1, NULL);
c3021629
MS
1362 cs.pipebufs = bufs;
1363 cs.pipe = pipe;
c3696046 1364 ret = fuse_dev_do_read(fud, in, &cs, len);
c3021629
MS
1365 if (ret < 0)
1366 goto out;
1367
c3021629
MS
1368 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1369 ret = -EIO;
d82718e3 1370 goto out;
c3021629
MS
1371 }
1372
d82718e3 1373 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
28a625cb
MS
1374 /*
1375 * Need to be careful about this. Having buf->ops in module
1376 * code can Oops if the buffer persists after module unload.
1377 */
d82718e3
AV
1378 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1379 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1380 if (unlikely(ret < 0))
1381 break;
c3021629 1382 }
d82718e3
AV
1383 if (total)
1384 ret = total;
c3021629
MS
1385out:
1386 for (; page_nr < cs.nr_segs; page_nr++)
09cbfeaf 1387 put_page(bufs[page_nr].page);
c3021629
MS
1388
1389 kfree(bufs);
1390 return ret;
1391}
1392
95668a69
TH
1393static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1394 struct fuse_copy_state *cs)
1395{
1396 struct fuse_notify_poll_wakeup_out outarg;
f6d47a17 1397 int err = -EINVAL;
95668a69
TH
1398
1399 if (size != sizeof(outarg))
f6d47a17 1400 goto err;
95668a69
TH
1401
1402 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1403 if (err)
f6d47a17 1404 goto err;
95668a69 1405
f6d47a17 1406 fuse_copy_finish(cs);
95668a69 1407 return fuse_notify_poll_wakeup(fc, &outarg);
f6d47a17
MS
1408
1409err:
1410 fuse_copy_finish(cs);
1411 return err;
95668a69
TH
1412}
1413
3b463ae0
JM
1414static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1415 struct fuse_copy_state *cs)
1416{
1417 struct fuse_notify_inval_inode_out outarg;
1418 int err = -EINVAL;
1419
1420 if (size != sizeof(outarg))
1421 goto err;
1422
1423 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1424 if (err)
1425 goto err;
1426 fuse_copy_finish(cs);
1427
1428 down_read(&fc->killsb);
1429 err = -ENOENT;
b21dda43
MS
1430 if (fc->sb) {
1431 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1432 outarg.off, outarg.len);
1433 }
3b463ae0
JM
1434 up_read(&fc->killsb);
1435 return err;
1436
1437err:
1438 fuse_copy_finish(cs);
1439 return err;
1440}
1441
1442static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1443 struct fuse_copy_state *cs)
1444{
1445 struct fuse_notify_inval_entry_out outarg;
b2d82ee3
FW
1446 int err = -ENOMEM;
1447 char *buf;
3b463ae0
JM
1448 struct qstr name;
1449
b2d82ee3
FW
1450 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1451 if (!buf)
1452 goto err;
1453
1454 err = -EINVAL;
3b463ae0
JM
1455 if (size < sizeof(outarg))
1456 goto err;
1457
1458 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1459 if (err)
1460 goto err;
1461
1462 err = -ENAMETOOLONG;
1463 if (outarg.namelen > FUSE_NAME_MAX)
1464 goto err;
1465
c2183d1e
MS
1466 err = -EINVAL;
1467 if (size != sizeof(outarg) + outarg.namelen + 1)
1468 goto err;
1469
3b463ae0
JM
1470 name.name = buf;
1471 name.len = outarg.namelen;
1472 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1473 if (err)
1474 goto err;
1475 fuse_copy_finish(cs);
1476 buf[outarg.namelen] = 0;
3b463ae0
JM
1477
1478 down_read(&fc->killsb);
1479 err = -ENOENT;
b21dda43 1480 if (fc->sb)
451d0f59
JM
1481 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1482 up_read(&fc->killsb);
1483 kfree(buf);
1484 return err;
1485
1486err:
1487 kfree(buf);
1488 fuse_copy_finish(cs);
1489 return err;
1490}
1491
1492static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1493 struct fuse_copy_state *cs)
1494{
1495 struct fuse_notify_delete_out outarg;
1496 int err = -ENOMEM;
1497 char *buf;
1498 struct qstr name;
1499
1500 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1501 if (!buf)
1502 goto err;
1503
1504 err = -EINVAL;
1505 if (size < sizeof(outarg))
1506 goto err;
1507
1508 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1509 if (err)
1510 goto err;
1511
1512 err = -ENAMETOOLONG;
1513 if (outarg.namelen > FUSE_NAME_MAX)
1514 goto err;
1515
1516 err = -EINVAL;
1517 if (size != sizeof(outarg) + outarg.namelen + 1)
1518 goto err;
1519
1520 name.name = buf;
1521 name.len = outarg.namelen;
1522 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1523 if (err)
1524 goto err;
1525 fuse_copy_finish(cs);
1526 buf[outarg.namelen] = 0;
451d0f59
JM
1527
1528 down_read(&fc->killsb);
1529 err = -ENOENT;
1530 if (fc->sb)
1531 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1532 outarg.child, &name);
3b463ae0 1533 up_read(&fc->killsb);
b2d82ee3 1534 kfree(buf);
3b463ae0
JM
1535 return err;
1536
1537err:
b2d82ee3 1538 kfree(buf);
3b463ae0
JM
1539 fuse_copy_finish(cs);
1540 return err;
1541}
1542
a1d75f25
MS
1543static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1544 struct fuse_copy_state *cs)
1545{
1546 struct fuse_notify_store_out outarg;
1547 struct inode *inode;
1548 struct address_space *mapping;
1549 u64 nodeid;
1550 int err;
1551 pgoff_t index;
1552 unsigned int offset;
1553 unsigned int num;
1554 loff_t file_size;
1555 loff_t end;
1556
1557 err = -EINVAL;
1558 if (size < sizeof(outarg))
1559 goto out_finish;
1560
1561 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1562 if (err)
1563 goto out_finish;
1564
1565 err = -EINVAL;
1566 if (size - sizeof(outarg) != outarg.size)
1567 goto out_finish;
1568
1569 nodeid = outarg.nodeid;
1570
1571 down_read(&fc->killsb);
1572
1573 err = -ENOENT;
1574 if (!fc->sb)
1575 goto out_up_killsb;
1576
1577 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1578 if (!inode)
1579 goto out_up_killsb;
1580
1581 mapping = inode->i_mapping;
09cbfeaf
KS
1582 index = outarg.offset >> PAGE_SHIFT;
1583 offset = outarg.offset & ~PAGE_MASK;
a1d75f25
MS
1584 file_size = i_size_read(inode);
1585 end = outarg.offset + outarg.size;
1586 if (end > file_size) {
1587 file_size = end;
1588 fuse_write_update_size(inode, file_size);
1589 }
1590
1591 num = outarg.size;
1592 while (num) {
1593 struct page *page;
1594 unsigned int this_num;
1595
1596 err = -ENOMEM;
1597 page = find_or_create_page(mapping, index,
1598 mapping_gfp_mask(mapping));
1599 if (!page)
1600 goto out_iput;
1601
09cbfeaf 1602 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
a1d75f25 1603 err = fuse_copy_page(cs, &page, offset, this_num, 0);
063ec1e5 1604 if (!err && offset == 0 &&
09cbfeaf 1605 (this_num == PAGE_SIZE || file_size == end))
a1d75f25
MS
1606 SetPageUptodate(page);
1607 unlock_page(page);
09cbfeaf 1608 put_page(page);
a1d75f25
MS
1609
1610 if (err)
1611 goto out_iput;
1612
1613 num -= this_num;
1614 offset = 0;
1615 index++;
1616 }
1617
1618 err = 0;
1619
1620out_iput:
1621 iput(inode);
1622out_up_killsb:
1623 up_read(&fc->killsb);
1624out_finish:
1625 fuse_copy_finish(cs);
1626 return err;
1627}
1628
2d45ba38
MS
1629static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1630{
b745bc85 1631 release_pages(req->pages, req->num_pages, false);
2d45ba38
MS
1632}
1633
1634static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1635 struct fuse_notify_retrieve_out *outarg)
1636{
1637 int err;
1638 struct address_space *mapping = inode->i_mapping;
1639 struct fuse_req *req;
1640 pgoff_t index;
1641 loff_t file_size;
1642 unsigned int num;
1643 unsigned int offset;
0157443c 1644 size_t total_len = 0;
4d53dc99 1645 int num_pages;
2d45ba38 1646
09cbfeaf 1647 offset = outarg->offset & ~PAGE_MASK;
4d53dc99
MP
1648 file_size = i_size_read(inode);
1649
1650 num = outarg->size;
1651 if (outarg->offset > file_size)
1652 num = 0;
1653 else if (outarg->offset + num > file_size)
1654 num = file_size - outarg->offset;
1655
1656 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1657 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1658
1659 req = fuse_get_req(fc, num_pages);
2d45ba38
MS
1660 if (IS_ERR(req))
1661 return PTR_ERR(req);
1662
2d45ba38
MS
1663 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1664 req->in.h.nodeid = outarg->nodeid;
1665 req->in.numargs = 2;
1666 req->in.argpages = 1;
b2430d75 1667 req->page_descs[0].offset = offset;
2d45ba38
MS
1668 req->end = fuse_retrieve_end;
1669
09cbfeaf 1670 index = outarg->offset >> PAGE_SHIFT;
2d45ba38 1671
4d53dc99 1672 while (num && req->num_pages < num_pages) {
2d45ba38
MS
1673 struct page *page;
1674 unsigned int this_num;
1675
1676 page = find_get_page(mapping, index);
1677 if (!page)
1678 break;
1679
09cbfeaf 1680 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
2d45ba38 1681 req->pages[req->num_pages] = page;
85f40aec 1682 req->page_descs[req->num_pages].length = this_num;
2d45ba38
MS
1683 req->num_pages++;
1684
c9e67d48 1685 offset = 0;
2d45ba38
MS
1686 num -= this_num;
1687 total_len += this_num;
48706d0a 1688 index++;
2d45ba38
MS
1689 }
1690 req->misc.retrieve_in.offset = outarg->offset;
1691 req->misc.retrieve_in.size = total_len;
1692 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1693 req->in.args[0].value = &req->misc.retrieve_in;
1694 req->in.args[1].size = total_len;
1695
1696 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1697 if (err)
1698 fuse_retrieve_end(fc, req);
1699
1700 return err;
1701}
1702
1703static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1704 struct fuse_copy_state *cs)
1705{
1706 struct fuse_notify_retrieve_out outarg;
1707 struct inode *inode;
1708 int err;
1709
1710 err = -EINVAL;
1711 if (size != sizeof(outarg))
1712 goto copy_finish;
1713
1714 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1715 if (err)
1716 goto copy_finish;
1717
1718 fuse_copy_finish(cs);
1719
1720 down_read(&fc->killsb);
1721 err = -ENOENT;
1722 if (fc->sb) {
1723 u64 nodeid = outarg.nodeid;
1724
1725 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1726 if (inode) {
1727 err = fuse_retrieve(fc, inode, &outarg);
1728 iput(inode);
1729 }
1730 }
1731 up_read(&fc->killsb);
1732
1733 return err;
1734
1735copy_finish:
1736 fuse_copy_finish(cs);
1737 return err;
1738}
1739
8599396b
TH
1740static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1741 unsigned int size, struct fuse_copy_state *cs)
1742{
0d278362
MS
1743 /* Don't try to move pages (yet) */
1744 cs->move_pages = 0;
1745
8599396b 1746 switch (code) {
95668a69
TH
1747 case FUSE_NOTIFY_POLL:
1748 return fuse_notify_poll(fc, size, cs);
1749
3b463ae0
JM
1750 case FUSE_NOTIFY_INVAL_INODE:
1751 return fuse_notify_inval_inode(fc, size, cs);
1752
1753 case FUSE_NOTIFY_INVAL_ENTRY:
1754 return fuse_notify_inval_entry(fc, size, cs);
1755
a1d75f25
MS
1756 case FUSE_NOTIFY_STORE:
1757 return fuse_notify_store(fc, size, cs);
1758
2d45ba38
MS
1759 case FUSE_NOTIFY_RETRIEVE:
1760 return fuse_notify_retrieve(fc, size, cs);
1761
451d0f59
JM
1762 case FUSE_NOTIFY_DELETE:
1763 return fuse_notify_delete(fc, size, cs);
1764
8599396b 1765 default:
f6d47a17 1766 fuse_copy_finish(cs);
8599396b
TH
1767 return -EINVAL;
1768 }
1769}
1770
334f485d 1771/* Look up request on processing list by unique ID */
3a2b5b9c 1772static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
334f485d 1773{
05726aca 1774 struct fuse_req *req;
334f485d 1775
3a2b5b9c 1776 list_for_each_entry(req, &fpq->processing, list) {
a4d27e75 1777 if (req->in.h.unique == unique || req->intr_unique == unique)
334f485d
MS
1778 return req;
1779 }
1780 return NULL;
1781}
1782
1783static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1784 unsigned nbytes)
1785{
1786 unsigned reqsize = sizeof(struct fuse_out_header);
1787
1788 if (out->h.error)
1789 return nbytes != reqsize ? -EINVAL : 0;
1790
1791 reqsize += len_args(out->numargs, out->args);
1792
1793 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1794 return -EINVAL;
1795 else if (reqsize > nbytes) {
1796 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1797 unsigned diffsize = reqsize - nbytes;
1798 if (diffsize > lastarg->size)
1799 return -EINVAL;
1800 lastarg->size -= diffsize;
1801 }
1802 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1803 out->page_zeroing);
1804}
1805
1806/*
1807 * Write a single reply to a request. First the header is copied from
1808 * the write buffer. The request is then searched on the processing
1809 * list by the unique ID found in the header. If found, then remove
1810 * it from the list and copy the rest of the buffer to the request.
1811 * The request is finished by calling request_end()
1812 */
c3696046 1813static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
dd3bb14f 1814 struct fuse_copy_state *cs, size_t nbytes)
334f485d
MS
1815{
1816 int err;
c3696046
MS
1817 struct fuse_conn *fc = fud->fc;
1818 struct fuse_pqueue *fpq = &fud->pq;
334f485d
MS
1819 struct fuse_req *req;
1820 struct fuse_out_header oh;
334f485d 1821
334f485d
MS
1822 if (nbytes < sizeof(struct fuse_out_header))
1823 return -EINVAL;
1824
dd3bb14f 1825 err = fuse_copy_one(cs, &oh, sizeof(oh));
334f485d
MS
1826 if (err)
1827 goto err_finish;
8599396b
TH
1828
1829 err = -EINVAL;
1830 if (oh.len != nbytes)
1831 goto err_finish;
1832
1833 /*
1834 * Zero oh.unique indicates unsolicited notification message
1835 * and error contains notification code.
1836 */
1837 if (!oh.unique) {
dd3bb14f 1838 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
8599396b
TH
1839 return err ? err : nbytes;
1840 }
1841
334f485d 1842 err = -EINVAL;
8599396b 1843 if (oh.error <= -1000 || oh.error > 0)
334f485d
MS
1844 goto err_finish;
1845
45a91cb1 1846 spin_lock(&fpq->lock);
69a53bf2 1847 err = -ENOENT;
e96edd94 1848 if (!fpq->connected)
45a91cb1 1849 goto err_unlock_pq;
69a53bf2 1850
3a2b5b9c 1851 req = request_find(fpq, oh.unique);
334f485d 1852 if (!req)
45a91cb1 1853 goto err_unlock_pq;
334f485d 1854
a4d27e75
MS
1855 /* Is it an interrupt reply? */
1856 if (req->intr_unique == oh.unique) {
45a91cb1
MS
1857 spin_unlock(&fpq->lock);
1858
a4d27e75
MS
1859 err = -EINVAL;
1860 if (nbytes != sizeof(struct fuse_out_header))
46c34a34 1861 goto err_finish;
a4d27e75
MS
1862
1863 if (oh.error == -ENOSYS)
1864 fc->no_interrupt = 1;
1865 else if (oh.error == -EAGAIN)
f88996a9 1866 queue_interrupt(&fc->iq, req);
a4d27e75 1867
dd3bb14f 1868 fuse_copy_finish(cs);
a4d27e75
MS
1869 return nbytes;
1870 }
1871
33e14b4d 1872 clear_bit(FR_SENT, &req->flags);
3a2b5b9c 1873 list_move(&req->list, &fpq->io);
334f485d 1874 req->out.h = oh;
825d6d33 1875 set_bit(FR_LOCKED, &req->flags);
45a91cb1 1876 spin_unlock(&fpq->lock);
dd3bb14f 1877 cs->req = req;
ce534fb0
MS
1878 if (!req->out.page_replace)
1879 cs->move_pages = 0;
334f485d 1880
dd3bb14f
MS
1881 err = copy_out_args(cs, &req->out, nbytes);
1882 fuse_copy_finish(cs);
334f485d 1883
45a91cb1 1884 spin_lock(&fpq->lock);
825d6d33 1885 clear_bit(FR_LOCKED, &req->flags);
e96edd94 1886 if (!fpq->connected)
0d8e84b0
MS
1887 err = -ENOENT;
1888 else if (err)
334f485d 1889 req->out.h.error = -EIO;
77cd9d48
MS
1890 if (!test_bit(FR_PRIVATE, &req->flags))
1891 list_del_init(&req->list);
45a91cb1 1892 spin_unlock(&fpq->lock);
46c34a34 1893
334f485d
MS
1894 request_end(fc, req);
1895
1896 return err ? err : nbytes;
1897
45a91cb1
MS
1898 err_unlock_pq:
1899 spin_unlock(&fpq->lock);
334f485d 1900 err_finish:
dd3bb14f 1901 fuse_copy_finish(cs);
334f485d
MS
1902 return err;
1903}
1904
fbdbacca 1905static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
dd3bb14f
MS
1906{
1907 struct fuse_copy_state cs;
cc080e9e
MS
1908 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1909
1910 if (!fud)
dd3bb14f
MS
1911 return -EPERM;
1912
fbdbacca
AV
1913 if (!iter_is_iovec(from))
1914 return -EINVAL;
1915
dc00809a 1916 fuse_copy_init(&cs, 0, from);
dd3bb14f 1917
c3696046 1918 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
dd3bb14f
MS
1919}
1920
1921static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1922 struct file *out, loff_t *ppos,
1923 size_t len, unsigned int flags)
1924{
1925 unsigned nbuf;
1926 unsigned idx;
1927 struct pipe_buffer *bufs;
1928 struct fuse_copy_state cs;
cc080e9e 1929 struct fuse_dev *fud;
dd3bb14f
MS
1930 size_t rem;
1931 ssize_t ret;
1932
cc080e9e
MS
1933 fud = fuse_get_dev(out);
1934 if (!fud)
dd3bb14f
MS
1935 return -EPERM;
1936
07e77dca 1937 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
dd3bb14f
MS
1938 if (!bufs)
1939 return -ENOMEM;
1940
1941 pipe_lock(pipe);
1942 nbuf = 0;
1943 rem = 0;
1944 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1945 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1946
1947 ret = -EINVAL;
1948 if (rem < len) {
1949 pipe_unlock(pipe);
1950 goto out;
1951 }
1952
1953 rem = len;
1954 while (rem) {
1955 struct pipe_buffer *ibuf;
1956 struct pipe_buffer *obuf;
1957
1958 BUG_ON(nbuf >= pipe->buffers);
1959 BUG_ON(!pipe->nrbufs);
1960 ibuf = &pipe->bufs[pipe->curbuf];
1961 obuf = &bufs[nbuf];
1962
1963 if (rem >= ibuf->len) {
1964 *obuf = *ibuf;
1965 ibuf->ops = NULL;
1966 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1967 pipe->nrbufs--;
1968 } else {
7bf2d1df 1969 pipe_buf_get(pipe, ibuf);
dd3bb14f
MS
1970 *obuf = *ibuf;
1971 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1972 obuf->len = rem;
1973 ibuf->offset += obuf->len;
1974 ibuf->len -= obuf->len;
1975 }
1976 nbuf++;
1977 rem -= obuf->len;
1978 }
1979 pipe_unlock(pipe);
1980
dc00809a 1981 fuse_copy_init(&cs, 0, NULL);
dd3bb14f 1982 cs.pipebufs = bufs;
6c09e94a 1983 cs.nr_segs = nbuf;
dd3bb14f
MS
1984 cs.pipe = pipe;
1985
ce534fb0
MS
1986 if (flags & SPLICE_F_MOVE)
1987 cs.move_pages = 1;
1988
c3696046 1989 ret = fuse_dev_do_write(fud, &cs, len);
dd3bb14f 1990
a779638c
MS
1991 for (idx = 0; idx < nbuf; idx++)
1992 pipe_buf_release(pipe, &bufs[idx]);
1993
dd3bb14f
MS
1994out:
1995 kfree(bufs);
1996 return ret;
1997}
1998
334f485d
MS
1999static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2000{
334f485d 2001 unsigned mask = POLLOUT | POLLWRNORM;
f88996a9 2002 struct fuse_iqueue *fiq;
cc080e9e
MS
2003 struct fuse_dev *fud = fuse_get_dev(file);
2004
2005 if (!fud)
7025d9ad 2006 return POLLERR;
334f485d 2007
cc080e9e 2008 fiq = &fud->fc->iq;
f88996a9 2009 poll_wait(file, &fiq->waitq, wait);
334f485d 2010
4ce60812 2011 spin_lock(&fiq->waitq.lock);
e16714d8 2012 if (!fiq->connected)
7025d9ad 2013 mask = POLLERR;
f88996a9 2014 else if (request_pending(fiq))
7025d9ad 2015 mask |= POLLIN | POLLRDNORM;
4ce60812 2016 spin_unlock(&fiq->waitq.lock);
334f485d
MS
2017
2018 return mask;
2019}
2020
69a53bf2
MS
2021/*
2022 * Abort all requests on the given list (pending or processing)
2023 *
d7133114 2024 * This function releases and reacquires fc->lock
69a53bf2 2025 */
334f485d
MS
2026static void end_requests(struct fuse_conn *fc, struct list_head *head)
2027{
2028 while (!list_empty(head)) {
2029 struct fuse_req *req;
2030 req = list_entry(head->next, struct fuse_req, list);
334f485d 2031 req->out.h.error = -ECONNABORTED;
33e14b4d 2032 clear_bit(FR_SENT, &req->flags);
f377cb79 2033 list_del_init(&req->list);
334f485d 2034 request_end(fc, req);
334f485d
MS
2035 }
2036}
2037
357ccf2b
BG
2038static void end_polls(struct fuse_conn *fc)
2039{
2040 struct rb_node *p;
2041
2042 p = rb_first(&fc->polled_files);
2043
2044 while (p) {
2045 struct fuse_file *ff;
2046 ff = rb_entry(p, struct fuse_file, polled_node);
2047 wake_up_interruptible_all(&ff->poll_wait);
2048
2049 p = rb_next(p);
2050 }
2051}
2052
69a53bf2
MS
2053/*
2054 * Abort all requests.
2055 *
b716d425
MS
2056 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2057 * filesystem.
2058 *
2059 * The same effect is usually achievable through killing the filesystem daemon
2060 * and all users of the filesystem. The exception is the combination of an
2061 * asynchronous request and the tricky deadlock (see
2062 * Documentation/filesystems/fuse.txt).
69a53bf2 2063 *
b716d425
MS
2064 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2065 * requests, they should be finished off immediately. Locked requests will be
2066 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2067 * requests. It is possible that some request will finish before we can. This
2068 * is OK, the request will in that case be removed from the list before we touch
2069 * it.
69a53bf2
MS
2070 */
2071void fuse_abort_conn(struct fuse_conn *fc)
2072{
f88996a9
MS
2073 struct fuse_iqueue *fiq = &fc->iq;
2074
d7133114 2075 spin_lock(&fc->lock);
69a53bf2 2076 if (fc->connected) {
c3696046 2077 struct fuse_dev *fud;
b716d425 2078 struct fuse_req *req, *next;
41f98274
MS
2079 LIST_HEAD(to_end1);
2080 LIST_HEAD(to_end2);
b716d425 2081
69a53bf2 2082 fc->connected = 0;
51eb01e7 2083 fc->blocked = 0;
9759bd51 2084 fuse_set_initialized(fc);
c3696046
MS
2085 list_for_each_entry(fud, &fc->devices, entry) {
2086 struct fuse_pqueue *fpq = &fud->pq;
2087
2088 spin_lock(&fpq->lock);
2089 fpq->connected = 0;
2090 list_for_each_entry_safe(req, next, &fpq->io, list) {
2091 req->out.h.error = -ECONNABORTED;
2092 spin_lock(&req->waitq.lock);
2093 set_bit(FR_ABORTED, &req->flags);
2094 if (!test_bit(FR_LOCKED, &req->flags)) {
2095 set_bit(FR_PRIVATE, &req->flags);
2096 list_move(&req->list, &to_end1);
2097 }
2098 spin_unlock(&req->waitq.lock);
77cd9d48 2099 }
c3696046
MS
2100 list_splice_init(&fpq->processing, &to_end2);
2101 spin_unlock(&fpq->lock);
b716d425 2102 }
41f98274
MS
2103 fc->max_background = UINT_MAX;
2104 flush_bg_queue(fc);
8c91189a 2105
4ce60812 2106 spin_lock(&fiq->waitq.lock);
8c91189a 2107 fiq->connected = 0;
f88996a9 2108 list_splice_init(&fiq->pending, &to_end2);
a8a86d78
TE
2109 list_for_each_entry(req, &to_end2, list)
2110 clear_bit(FR_PENDING, &req->flags);
8c91189a
MS
2111 while (forget_pending(fiq))
2112 kfree(dequeue_forget(fiq, 1, NULL));
4ce60812
MS
2113 wake_up_all_locked(&fiq->waitq);
2114 spin_unlock(&fiq->waitq.lock);
8c91189a 2115 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
ee314a87
MS
2116 end_polls(fc);
2117 wake_up_all(&fc->blocked_waitq);
2118 spin_unlock(&fc->lock);
8c91189a 2119
41f98274
MS
2120 while (!list_empty(&to_end1)) {
2121 req = list_first_entry(&to_end1, struct fuse_req, list);
b716d425 2122 __fuse_get_request(req);
f377cb79 2123 list_del_init(&req->list);
b716d425 2124 request_end(fc, req);
b716d425 2125 }
41f98274 2126 end_requests(fc, &to_end2);
ee314a87
MS
2127 } else {
2128 spin_unlock(&fc->lock);
69a53bf2 2129 }
69a53bf2 2130}
08cbf542 2131EXPORT_SYMBOL_GPL(fuse_abort_conn);
69a53bf2 2132
08cbf542 2133int fuse_dev_release(struct inode *inode, struct file *file)
334f485d 2134{
cc080e9e
MS
2135 struct fuse_dev *fud = fuse_get_dev(file);
2136
2137 if (fud) {
2138 struct fuse_conn *fc = fud->fc;
c3696046
MS
2139 struct fuse_pqueue *fpq = &fud->pq;
2140
2141 WARN_ON(!list_empty(&fpq->io));
2142 end_requests(fc, &fpq->processing);
2143 /* Are we the last open device? */
2144 if (atomic_dec_and_test(&fc->dev_count)) {
2145 WARN_ON(fc->iq.fasync != NULL);
2146 fuse_abort_conn(fc);
2147 }
cc080e9e 2148 fuse_dev_free(fud);
385a17bf 2149 }
334f485d
MS
2150 return 0;
2151}
08cbf542 2152EXPORT_SYMBOL_GPL(fuse_dev_release);
334f485d 2153
385a17bf
JD
2154static int fuse_dev_fasync(int fd, struct file *file, int on)
2155{
cc080e9e
MS
2156 struct fuse_dev *fud = fuse_get_dev(file);
2157
2158 if (!fud)
a87046d8 2159 return -EPERM;
385a17bf
JD
2160
2161 /* No locking - fasync_helper does its own locking */
cc080e9e 2162 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
385a17bf
JD
2163}
2164
00c570f4
MS
2165static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2166{
cc080e9e
MS
2167 struct fuse_dev *fud;
2168
00c570f4
MS
2169 if (new->private_data)
2170 return -EINVAL;
2171
cc080e9e
MS
2172 fud = fuse_dev_alloc(fc);
2173 if (!fud)
2174 return -ENOMEM;
2175
2176 new->private_data = fud;
c3696046 2177 atomic_inc(&fc->dev_count);
00c570f4
MS
2178
2179 return 0;
2180}
2181
2182static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2183 unsigned long arg)
2184{
2185 int err = -ENOTTY;
2186
2187 if (cmd == FUSE_DEV_IOC_CLONE) {
2188 int oldfd;
2189
2190 err = -EFAULT;
2191 if (!get_user(oldfd, (__u32 __user *) arg)) {
2192 struct file *old = fget(oldfd);
2193
2194 err = -EINVAL;
2195 if (old) {
8ed1f0e2
JH
2196 struct fuse_dev *fud = NULL;
2197
2198 /*
2199 * Check against file->f_op because CUSE
2200 * uses the same ioctl handler.
2201 */
2202 if (old->f_op == file->f_op &&
2203 old->f_cred->user_ns == file->f_cred->user_ns)
2204 fud = fuse_get_dev(old);
00c570f4 2205
cc080e9e 2206 if (fud) {
00c570f4 2207 mutex_lock(&fuse_mutex);
cc080e9e 2208 err = fuse_device_clone(fud->fc, file);
00c570f4
MS
2209 mutex_unlock(&fuse_mutex);
2210 }
2211 fput(old);
2212 }
2213 }
2214 }
2215 return err;
2216}
2217
4b6f5d20 2218const struct file_operations fuse_dev_operations = {
334f485d 2219 .owner = THIS_MODULE,
94e4fe2c 2220 .open = fuse_dev_open,
334f485d 2221 .llseek = no_llseek,
fbdbacca 2222 .read_iter = fuse_dev_read,
c3021629 2223 .splice_read = fuse_dev_splice_read,
fbdbacca 2224 .write_iter = fuse_dev_write,
dd3bb14f 2225 .splice_write = fuse_dev_splice_write,
334f485d
MS
2226 .poll = fuse_dev_poll,
2227 .release = fuse_dev_release,
385a17bf 2228 .fasync = fuse_dev_fasync,
00c570f4
MS
2229 .unlocked_ioctl = fuse_dev_ioctl,
2230 .compat_ioctl = fuse_dev_ioctl,
334f485d 2231};
08cbf542 2232EXPORT_SYMBOL_GPL(fuse_dev_operations);
334f485d
MS
2233
2234static struct miscdevice fuse_miscdevice = {
2235 .minor = FUSE_MINOR,
2236 .name = "fuse",
2237 .fops = &fuse_dev_operations,
2238};
2239
2240int __init fuse_dev_init(void)
2241{
2242 int err = -ENOMEM;
2243 fuse_req_cachep = kmem_cache_create("fuse_request",
2244 sizeof(struct fuse_req),
20c2df83 2245 0, 0, NULL);
334f485d
MS
2246 if (!fuse_req_cachep)
2247 goto out;
2248
2249 err = misc_register(&fuse_miscdevice);
2250 if (err)
2251 goto out_cache_clean;
2252
2253 return 0;
2254
2255 out_cache_clean:
2256 kmem_cache_destroy(fuse_req_cachep);
2257 out:
2258 return err;
2259}
2260
2261void fuse_dev_cleanup(void)
2262{
2263 misc_deregister(&fuse_miscdevice);
2264 kmem_cache_destroy(fuse_req_cachep);
2265}