]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/fuse/dev.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-artful-kernel.git] / fs / fuse / dev.c
CommitLineData
334f485d
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
334f485d
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
174cd4b1 14#include <linux/sched/signal.h>
334f485d
MS
15#include <linux/uio.h>
16#include <linux/miscdevice.h>
17#include <linux/pagemap.h>
18#include <linux/file.h>
19#include <linux/slab.h>
dd3bb14f 20#include <linux/pipe_fs_i.h>
ce534fb0
MS
21#include <linux/swap.h>
22#include <linux/splice.h>
0b6e9ea0 23#include <linux/sched.h>
334f485d
MS
24
25MODULE_ALIAS_MISCDEV(FUSE_MINOR);
578454ff 26MODULE_ALIAS("devname:fuse");
334f485d 27
e18b890b 28static struct kmem_cache *fuse_req_cachep;
334f485d 29
cc080e9e 30static struct fuse_dev *fuse_get_dev(struct file *file)
334f485d 31{
0720b315
MS
32 /*
33 * Lockless access is OK, because file->private data is set
34 * once during mount and is valid until the file is released.
35 */
cc080e9e 36 return ACCESS_ONCE(file->private_data);
334f485d
MS
37}
38
4250c066 39static void fuse_request_init(struct fuse_req *req, struct page **pages,
b2430d75 40 struct fuse_page_desc *page_descs,
4250c066 41 unsigned npages)
334f485d
MS
42{
43 memset(req, 0, sizeof(*req));
4250c066 44 memset(pages, 0, sizeof(*pages) * npages);
b2430d75 45 memset(page_descs, 0, sizeof(*page_descs) * npages);
334f485d 46 INIT_LIST_HEAD(&req->list);
a4d27e75 47 INIT_LIST_HEAD(&req->intr_entry);
334f485d 48 init_waitqueue_head(&req->waitq);
ec99f6d3 49 refcount_set(&req->count, 1);
4250c066 50 req->pages = pages;
b2430d75 51 req->page_descs = page_descs;
4250c066 52 req->max_pages = npages;
33e14b4d 53 __set_bit(FR_PENDING, &req->flags);
334f485d
MS
54}
55
4250c066 56static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
334f485d 57{
4250c066
MP
58 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
59 if (req) {
60 struct page **pages;
b2430d75 61 struct fuse_page_desc *page_descs;
4250c066 62
b2430d75 63 if (npages <= FUSE_REQ_INLINE_PAGES) {
4250c066 64 pages = req->inline_pages;
b2430d75
MP
65 page_descs = req->inline_page_descs;
66 } else {
4250c066 67 pages = kmalloc(sizeof(struct page *) * npages, flags);
b2430d75
MP
68 page_descs = kmalloc(sizeof(struct fuse_page_desc) *
69 npages, flags);
70 }
4250c066 71
b2430d75
MP
72 if (!pages || !page_descs) {
73 kfree(pages);
74 kfree(page_descs);
4250c066
MP
75 kmem_cache_free(fuse_req_cachep, req);
76 return NULL;
77 }
78
b2430d75 79 fuse_request_init(req, pages, page_descs, npages);
4250c066 80 }
334f485d
MS
81 return req;
82}
4250c066
MP
83
84struct fuse_req *fuse_request_alloc(unsigned npages)
85{
86 return __fuse_request_alloc(npages, GFP_KERNEL);
87}
08cbf542 88EXPORT_SYMBOL_GPL(fuse_request_alloc);
334f485d 89
4250c066 90struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
3be5a52b 91{
4250c066 92 return __fuse_request_alloc(npages, GFP_NOFS);
3be5a52b
MS
93}
94
334f485d
MS
95void fuse_request_free(struct fuse_req *req)
96{
b2430d75 97 if (req->pages != req->inline_pages) {
4250c066 98 kfree(req->pages);
b2430d75
MP
99 kfree(req->page_descs);
100 }
334f485d
MS
101 kmem_cache_free(fuse_req_cachep, req);
102}
103
36cf66ed 104void __fuse_get_request(struct fuse_req *req)
334f485d 105{
ec99f6d3 106 refcount_inc(&req->count);
334f485d
MS
107}
108
109/* Must be called with > 1 refcount */
110static void __fuse_put_request(struct fuse_req *req)
111{
ec99f6d3 112 refcount_dec(&req->count);
334f485d
MS
113}
114
0b6e9ea0 115static void fuse_req_init_context(struct fuse_conn *fc, struct fuse_req *req)
33649c91 116{
499dcf20
EB
117 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
118 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
0b6e9ea0 119 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
33649c91
MS
120}
121
9759bd51
MS
122void fuse_set_initialized(struct fuse_conn *fc)
123{
124 /* Make sure stores before this are seen on another CPU */
125 smp_wmb();
126 fc->initialized = 1;
127}
128
0aada884
MP
129static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
130{
131 return !fc->initialized || (for_background && fc->blocked);
132}
133
8b41e671
MP
134static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
135 bool for_background)
334f485d 136{
08a53cdc 137 struct fuse_req *req;
08a53cdc 138 int err;
9bc5ddda 139 atomic_inc(&fc->num_waiting);
0aada884
MP
140
141 if (fuse_block_alloc(fc, for_background)) {
0aada884 142 err = -EINTR;
7d3a07fc
AV
143 if (wait_event_killable_exclusive(fc->blocked_waitq,
144 !fuse_block_alloc(fc, for_background)))
0aada884
MP
145 goto out;
146 }
9759bd51
MS
147 /* Matches smp_wmb() in fuse_set_initialized() */
148 smp_rmb();
08a53cdc 149
51eb01e7
MS
150 err = -ENOTCONN;
151 if (!fc->connected)
152 goto out;
153
de155226
MS
154 err = -ECONNREFUSED;
155 if (fc->conn_error)
156 goto out;
157
b111c8c0 158 req = fuse_request_alloc(npages);
9bc5ddda 159 err = -ENOMEM;
722d2bea
MP
160 if (!req) {
161 if (for_background)
162 wake_up(&fc->blocked_waitq);
9bc5ddda 163 goto out;
722d2bea 164 }
334f485d 165
0b6e9ea0 166 fuse_req_init_context(fc, req);
825d6d33
MS
167 __set_bit(FR_WAITING, &req->flags);
168 if (for_background)
169 __set_bit(FR_BACKGROUND, &req->flags);
170
334f485d 171 return req;
9bc5ddda
MS
172
173 out:
174 atomic_dec(&fc->num_waiting);
175 return ERR_PTR(err);
334f485d 176}
8b41e671
MP
177
178struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
179{
180 return __fuse_get_req(fc, npages, false);
181}
08cbf542 182EXPORT_SYMBOL_GPL(fuse_get_req);
334f485d 183
8b41e671
MP
184struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
185 unsigned npages)
186{
187 return __fuse_get_req(fc, npages, true);
188}
189EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
190
33649c91
MS
191/*
192 * Return request in fuse_file->reserved_req. However that may
193 * currently be in use. If that is the case, wait for it to become
194 * available.
195 */
196static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
197 struct file *file)
198{
199 struct fuse_req *req = NULL;
200 struct fuse_file *ff = file->private_data;
201
202 do {
de5e3dec 203 wait_event(fc->reserved_req_waitq, ff->reserved_req);
33649c91
MS
204 spin_lock(&fc->lock);
205 if (ff->reserved_req) {
206 req = ff->reserved_req;
207 ff->reserved_req = NULL;
cb0942b8 208 req->stolen_file = get_file(file);
33649c91
MS
209 }
210 spin_unlock(&fc->lock);
211 } while (!req);
212
213 return req;
214}
215
216/*
217 * Put stolen request back into fuse_file->reserved_req
218 */
219static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
220{
221 struct file *file = req->stolen_file;
222 struct fuse_file *ff = file->private_data;
223
224 spin_lock(&fc->lock);
b2430d75 225 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
33649c91
MS
226 BUG_ON(ff->reserved_req);
227 ff->reserved_req = req;
de5e3dec 228 wake_up_all(&fc->reserved_req_waitq);
33649c91
MS
229 spin_unlock(&fc->lock);
230 fput(file);
231}
232
233/*
234 * Gets a requests for a file operation, always succeeds
235 *
236 * This is used for sending the FLUSH request, which must get to
237 * userspace, due to POSIX locks which may need to be unlocked.
238 *
239 * If allocation fails due to OOM, use the reserved request in
240 * fuse_file.
241 *
242 * This is very unlikely to deadlock accidentally, since the
243 * filesystem should not have it's own file open. If deadlock is
244 * intentional, it can still be broken by "aborting" the filesystem.
245 */
b111c8c0
MP
246struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
247 struct file *file)
33649c91
MS
248{
249 struct fuse_req *req;
250
251 atomic_inc(&fc->num_waiting);
0aada884 252 wait_event(fc->blocked_waitq, fc->initialized);
9759bd51
MS
253 /* Matches smp_wmb() in fuse_set_initialized() */
254 smp_rmb();
b111c8c0 255 req = fuse_request_alloc(0);
33649c91
MS
256 if (!req)
257 req = get_reserved_req(fc, file);
258
0b6e9ea0 259 fuse_req_init_context(fc, req);
825d6d33
MS
260 __set_bit(FR_WAITING, &req->flags);
261 __clear_bit(FR_BACKGROUND, &req->flags);
33649c91
MS
262 return req;
263}
264
334f485d 265void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
7128ec2a 266{
ec99f6d3 267 if (refcount_dec_and_test(&req->count)) {
825d6d33 268 if (test_bit(FR_BACKGROUND, &req->flags)) {
722d2bea
MP
269 /*
270 * We get here in the unlikely case that a background
271 * request was allocated but not sent
272 */
273 spin_lock(&fc->lock);
274 if (!fc->blocked)
275 wake_up(&fc->blocked_waitq);
276 spin_unlock(&fc->lock);
277 }
278
825d6d33
MS
279 if (test_bit(FR_WAITING, &req->flags)) {
280 __clear_bit(FR_WAITING, &req->flags);
9bc5ddda 281 atomic_dec(&fc->num_waiting);
73e0e738 282 }
33649c91
MS
283
284 if (req->stolen_file)
285 put_reserved_req(fc, req);
286 else
287 fuse_request_free(req);
7128ec2a
MS
288 }
289}
08cbf542 290EXPORT_SYMBOL_GPL(fuse_put_request);
7128ec2a 291
d12def1b
MS
292static unsigned len_args(unsigned numargs, struct fuse_arg *args)
293{
294 unsigned nbytes = 0;
295 unsigned i;
296
297 for (i = 0; i < numargs; i++)
298 nbytes += args[i].size;
299
300 return nbytes;
301}
302
f88996a9 303static u64 fuse_get_unique(struct fuse_iqueue *fiq)
d12def1b 304{
f88996a9 305 return ++fiq->reqctr;
d12def1b
MS
306}
307
f88996a9 308static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
d12def1b 309{
d12def1b
MS
310 req->in.h.len = sizeof(struct fuse_in_header) +
311 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
f88996a9 312 list_add_tail(&req->list, &fiq->pending);
4ce60812 313 wake_up_locked(&fiq->waitq);
f88996a9 314 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
d12def1b
MS
315}
316
07e77dca
MS
317void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
318 u64 nodeid, u64 nlookup)
319{
f88996a9
MS
320 struct fuse_iqueue *fiq = &fc->iq;
321
02c048b9
MS
322 forget->forget_one.nodeid = nodeid;
323 forget->forget_one.nlookup = nlookup;
07e77dca 324
4ce60812 325 spin_lock(&fiq->waitq.lock);
e16714d8 326 if (fiq->connected) {
f88996a9
MS
327 fiq->forget_list_tail->next = forget;
328 fiq->forget_list_tail = forget;
4ce60812 329 wake_up_locked(&fiq->waitq);
f88996a9 330 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
5dfcc87f
MS
331 } else {
332 kfree(forget);
333 }
4ce60812 334 spin_unlock(&fiq->waitq.lock);
07e77dca
MS
335}
336
d12def1b
MS
337static void flush_bg_queue(struct fuse_conn *fc)
338{
7a6d3c8b 339 while (fc->active_background < fc->max_background &&
d12def1b
MS
340 !list_empty(&fc->bg_queue)) {
341 struct fuse_req *req;
f88996a9 342 struct fuse_iqueue *fiq = &fc->iq;
d12def1b
MS
343
344 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
345 list_del(&req->list);
346 fc->active_background++;
4ce60812 347 spin_lock(&fiq->waitq.lock);
f88996a9
MS
348 req->in.h.unique = fuse_get_unique(fiq);
349 queue_request(fiq, req);
4ce60812 350 spin_unlock(&fiq->waitq.lock);
d12def1b
MS
351 }
352}
353
334f485d
MS
354/*
355 * This function is called when a request is finished. Either a reply
f9a2842e 356 * has arrived or it was aborted (and not yet sent) or some error
f43b155a 357 * occurred during communication with userspace, or the device file
51eb01e7
MS
358 * was closed. The requester thread is woken up (if still waiting),
359 * the 'end' callback is called if given, else the reference to the
360 * request is released
334f485d
MS
361 */
362static void request_end(struct fuse_conn *fc, struct fuse_req *req)
363{
4ce60812 364 struct fuse_iqueue *fiq = &fc->iq;
365ae710 365
efe2800f 366 if (test_and_set_bit(FR_FINISHED, &req->flags))
365ae710 367 return;
365ae710 368
4ce60812 369 spin_lock(&fiq->waitq.lock);
0d8e84b0 370 list_del_init(&req->intr_entry);
4ce60812 371 spin_unlock(&fiq->waitq.lock);
33e14b4d
MS
372 WARN_ON(test_bit(FR_PENDING, &req->flags));
373 WARN_ON(test_bit(FR_SENT, &req->flags));
825d6d33 374 if (test_bit(FR_BACKGROUND, &req->flags)) {
efe2800f 375 spin_lock(&fc->lock);
825d6d33 376 clear_bit(FR_BACKGROUND, &req->flags);
722d2bea 377 if (fc->num_background == fc->max_background)
51eb01e7 378 fc->blocked = 0;
722d2bea
MP
379
380 /* Wake up next waiter, if any */
3c18ef81 381 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
722d2bea
MP
382 wake_up(&fc->blocked_waitq);
383
7a6d3c8b 384 if (fc->num_background == fc->congestion_threshold &&
7fbbe972 385 fc->connected && fc->sb) {
5f7f7543
JK
386 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
387 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
f92b99b9 388 }
51eb01e7 389 fc->num_background--;
d12def1b
MS
390 fc->active_background--;
391 flush_bg_queue(fc);
efe2800f 392 spin_unlock(&fc->lock);
334f485d 393 }
51eb01e7 394 wake_up(&req->waitq);
1e6881c3
MS
395 if (req->end)
396 req->end(fc, req);
e9bb09dd 397 fuse_put_request(fc, req);
334f485d
MS
398}
399
f88996a9 400static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
a4d27e75 401{
4ce60812 402 spin_lock(&fiq->waitq.lock);
6ba4d272
ST
403 if (test_bit(FR_FINISHED, &req->flags)) {
404 spin_unlock(&fiq->waitq.lock);
405 return;
406 }
8f7bb368
MS
407 if (list_empty(&req->intr_entry)) {
408 list_add_tail(&req->intr_entry, &fiq->interrupts);
409 wake_up_locked(&fiq->waitq);
410 }
4ce60812 411 spin_unlock(&fiq->waitq.lock);
f88996a9 412 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
a4d27e75
MS
413}
414
7c352bdf 415static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
334f485d 416{
4ce60812 417 struct fuse_iqueue *fiq = &fc->iq;
c4775267
MS
418 int err;
419
a4d27e75
MS
420 if (!fc->no_interrupt) {
421 /* Any signal may interrupt this */
c4775267 422 err = wait_event_interruptible(req->waitq,
33e14b4d 423 test_bit(FR_FINISHED, &req->flags));
c4775267 424 if (!err)
a4d27e75
MS
425 return;
426
825d6d33 427 set_bit(FR_INTERRUPTED, &req->flags);
8f7bb368
MS
428 /* matches barrier in fuse_dev_do_read() */
429 smp_mb__after_atomic();
33e14b4d 430 if (test_bit(FR_SENT, &req->flags))
4ce60812 431 queue_interrupt(fiq, req);
a4d27e75
MS
432 }
433
825d6d33 434 if (!test_bit(FR_FORCE, &req->flags)) {
a4d27e75 435 /* Only fatal signals may interrupt this */
7d3a07fc 436 err = wait_event_killable(req->waitq,
33e14b4d 437 test_bit(FR_FINISHED, &req->flags));
c4775267 438 if (!err)
a131de0a
MS
439 return;
440
4ce60812 441 spin_lock(&fiq->waitq.lock);
a131de0a 442 /* Request is not yet in userspace, bail out */
33e14b4d 443 if (test_bit(FR_PENDING, &req->flags)) {
a131de0a 444 list_del(&req->list);
4ce60812 445 spin_unlock(&fiq->waitq.lock);
a131de0a
MS
446 __fuse_put_request(req);
447 req->out.h.error = -EINTR;
448 return;
449 }
4ce60812 450 spin_unlock(&fiq->waitq.lock);
51eb01e7 451 }
334f485d 452
a131de0a
MS
453 /*
454 * Either request is already in userspace, or it was forced.
455 * Wait it out.
456 */
33e14b4d 457 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
334f485d
MS
458}
459
6a4e922c 460static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
334f485d 461{
e16714d8
MS
462 struct fuse_iqueue *fiq = &fc->iq;
463
825d6d33 464 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
4ce60812 465 spin_lock(&fiq->waitq.lock);
e16714d8 466 if (!fiq->connected) {
4ce60812 467 spin_unlock(&fiq->waitq.lock);
334f485d 468 req->out.h.error = -ENOTCONN;
c4775267 469 } else {
f88996a9
MS
470 req->in.h.unique = fuse_get_unique(fiq);
471 queue_request(fiq, req);
334f485d
MS
472 /* acquire extra reference, since request is still needed
473 after request_end() */
474 __fuse_get_request(req);
4ce60812 475 spin_unlock(&fiq->waitq.lock);
334f485d 476
7c352bdf 477 request_wait_answer(fc, req);
c4775267
MS
478 /* Pairs with smp_wmb() in request_end() */
479 smp_rmb();
334f485d 480 }
334f485d 481}
6a4e922c
EW
482
483void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
484{
825d6d33
MS
485 __set_bit(FR_ISREPLY, &req->flags);
486 if (!test_bit(FR_WAITING, &req->flags)) {
487 __set_bit(FR_WAITING, &req->flags);
5437f241
MS
488 atomic_inc(&fc->num_waiting);
489 }
6a4e922c
EW
490 __fuse_request_send(fc, req);
491}
08cbf542 492EXPORT_SYMBOL_GPL(fuse_request_send);
334f485d 493
21f62174
MS
494static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
495{
496 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
497 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
498
499 if (fc->minor < 9) {
500 switch (args->in.h.opcode) {
501 case FUSE_LOOKUP:
502 case FUSE_CREATE:
503 case FUSE_MKNOD:
504 case FUSE_MKDIR:
505 case FUSE_SYMLINK:
506 case FUSE_LINK:
507 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
508 break;
509 case FUSE_GETATTR:
510 case FUSE_SETATTR:
511 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
512 break;
513 }
514 }
515 if (fc->minor < 12) {
516 switch (args->in.h.opcode) {
517 case FUSE_CREATE:
518 args->in.args[0].size = sizeof(struct fuse_open_in);
519 break;
520 case FUSE_MKNOD:
521 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
522 break;
523 }
524 }
525}
526
7078187a
MS
527ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
528{
529 struct fuse_req *req;
530 ssize_t ret;
531
532 req = fuse_get_req(fc, 0);
533 if (IS_ERR(req))
534 return PTR_ERR(req);
535
21f62174
MS
536 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
537 fuse_adjust_compat(fc, args);
538
7078187a
MS
539 req->in.h.opcode = args->in.h.opcode;
540 req->in.h.nodeid = args->in.h.nodeid;
541 req->in.numargs = args->in.numargs;
542 memcpy(req->in.args, args->in.args,
543 args->in.numargs * sizeof(struct fuse_in_arg));
544 req->out.argvar = args->out.argvar;
545 req->out.numargs = args->out.numargs;
546 memcpy(req->out.args, args->out.args,
547 args->out.numargs * sizeof(struct fuse_arg));
548 fuse_request_send(fc, req);
549 ret = req->out.h.error;
550 if (!ret && args->out.argvar) {
551 BUG_ON(args->out.numargs != 1);
552 ret = req->out.args[0].size;
553 }
554 fuse_put_request(fc, req);
555
556 return ret;
557}
558
f0139aa8
MS
559/*
560 * Called under fc->lock
561 *
562 * fc->connected must have been checked previously
563 */
564void fuse_request_send_background_locked(struct fuse_conn *fc,
565 struct fuse_req *req)
d12def1b 566{
825d6d33
MS
567 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
568 if (!test_bit(FR_WAITING, &req->flags)) {
569 __set_bit(FR_WAITING, &req->flags);
5437f241
MS
570 atomic_inc(&fc->num_waiting);
571 }
825d6d33 572 __set_bit(FR_ISREPLY, &req->flags);
d12def1b 573 fc->num_background++;
7a6d3c8b 574 if (fc->num_background == fc->max_background)
d12def1b 575 fc->blocked = 1;
7fbbe972 576 if (fc->num_background == fc->congestion_threshold && fc->sb) {
5f7f7543
JK
577 set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
578 set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
d12def1b
MS
579 }
580 list_add_tail(&req->list, &fc->bg_queue);
581 flush_bg_queue(fc);
582}
583
f0139aa8 584void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
334f485d 585{
42dc6211 586 BUG_ON(!req->end);
d7133114 587 spin_lock(&fc->lock);
1e9a4ed9 588 if (fc->connected) {
f0139aa8 589 fuse_request_send_background_locked(fc, req);
d7133114 590 spin_unlock(&fc->lock);
334f485d 591 } else {
42dc6211 592 spin_unlock(&fc->lock);
334f485d 593 req->out.h.error = -ENOTCONN;
42dc6211
MS
594 req->end(fc, req);
595 fuse_put_request(fc, req);
334f485d
MS
596 }
597}
08cbf542 598EXPORT_SYMBOL_GPL(fuse_request_send_background);
334f485d 599
2d45ba38
MS
600static int fuse_request_send_notify_reply(struct fuse_conn *fc,
601 struct fuse_req *req, u64 unique)
602{
603 int err = -ENODEV;
f88996a9 604 struct fuse_iqueue *fiq = &fc->iq;
2d45ba38 605
825d6d33 606 __clear_bit(FR_ISREPLY, &req->flags);
2d45ba38 607 req->in.h.unique = unique;
4ce60812 608 spin_lock(&fiq->waitq.lock);
e16714d8 609 if (fiq->connected) {
f88996a9 610 queue_request(fiq, req);
2d45ba38
MS
611 err = 0;
612 }
4ce60812 613 spin_unlock(&fiq->waitq.lock);
2d45ba38
MS
614
615 return err;
616}
617
0b05b183
AA
618void fuse_force_forget(struct file *file, u64 nodeid)
619{
6131ffaa 620 struct inode *inode = file_inode(file);
0b05b183
AA
621 struct fuse_conn *fc = get_fuse_conn(inode);
622 struct fuse_req *req;
623 struct fuse_forget_in inarg;
624
625 memset(&inarg, 0, sizeof(inarg));
626 inarg.nlookup = 1;
b111c8c0 627 req = fuse_get_req_nofail_nopages(fc, file);
0b05b183
AA
628 req->in.h.opcode = FUSE_FORGET;
629 req->in.h.nodeid = nodeid;
630 req->in.numargs = 1;
631 req->in.args[0].size = sizeof(inarg);
632 req->in.args[0].value = &inarg;
825d6d33 633 __clear_bit(FR_ISREPLY, &req->flags);
6a4e922c
EW
634 __fuse_request_send(fc, req);
635 /* ignore errors */
636 fuse_put_request(fc, req);
0b05b183
AA
637}
638
334f485d
MS
639/*
640 * Lock the request. Up to the next unlock_request() there mustn't be
641 * anything that could cause a page-fault. If the request was already
f9a2842e 642 * aborted bail out.
334f485d 643 */
dc00809a 644static int lock_request(struct fuse_req *req)
334f485d
MS
645{
646 int err = 0;
647 if (req) {
dc00809a 648 spin_lock(&req->waitq.lock);
825d6d33 649 if (test_bit(FR_ABORTED, &req->flags))
334f485d
MS
650 err = -ENOENT;
651 else
825d6d33 652 set_bit(FR_LOCKED, &req->flags);
dc00809a 653 spin_unlock(&req->waitq.lock);
334f485d
MS
654 }
655 return err;
656}
657
658/*
0d8e84b0
MS
659 * Unlock request. If it was aborted while locked, caller is responsible
660 * for unlocking and ending the request.
334f485d 661 */
dc00809a 662static int unlock_request(struct fuse_req *req)
334f485d 663{
0d8e84b0 664 int err = 0;
334f485d 665 if (req) {
dc00809a 666 spin_lock(&req->waitq.lock);
825d6d33 667 if (test_bit(FR_ABORTED, &req->flags))
0d8e84b0
MS
668 err = -ENOENT;
669 else
825d6d33 670 clear_bit(FR_LOCKED, &req->flags);
dc00809a 671 spin_unlock(&req->waitq.lock);
334f485d 672 }
0d8e84b0 673 return err;
334f485d
MS
674}
675
676struct fuse_copy_state {
677 int write;
678 struct fuse_req *req;
6c09e94a 679 struct iov_iter *iter;
dd3bb14f
MS
680 struct pipe_buffer *pipebufs;
681 struct pipe_buffer *currbuf;
682 struct pipe_inode_info *pipe;
334f485d 683 unsigned long nr_segs;
334f485d 684 struct page *pg;
334f485d 685 unsigned len;
c55a01d3 686 unsigned offset;
ce534fb0 687 unsigned move_pages:1;
334f485d
MS
688};
689
dc00809a 690static void fuse_copy_init(struct fuse_copy_state *cs, int write,
6c09e94a 691 struct iov_iter *iter)
334f485d
MS
692{
693 memset(cs, 0, sizeof(*cs));
694 cs->write = write;
6c09e94a 695 cs->iter = iter;
334f485d
MS
696}
697
698/* Unmap and put previous page of userspace buffer */
8bfc016d 699static void fuse_copy_finish(struct fuse_copy_state *cs)
334f485d 700{
dd3bb14f
MS
701 if (cs->currbuf) {
702 struct pipe_buffer *buf = cs->currbuf;
703
c55a01d3 704 if (cs->write)
c3021629 705 buf->len = PAGE_SIZE - cs->len;
dd3bb14f 706 cs->currbuf = NULL;
c55a01d3 707 } else if (cs->pg) {
334f485d
MS
708 if (cs->write) {
709 flush_dcache_page(cs->pg);
710 set_page_dirty_lock(cs->pg);
711 }
712 put_page(cs->pg);
334f485d 713 }
c55a01d3 714 cs->pg = NULL;
334f485d
MS
715}
716
717/*
718 * Get another pagefull of userspace buffer, and map it to kernel
719 * address space, and lock request
720 */
721static int fuse_copy_fill(struct fuse_copy_state *cs)
722{
c55a01d3 723 struct page *page;
334f485d
MS
724 int err;
725
dc00809a 726 err = unlock_request(cs->req);
0d8e84b0
MS
727 if (err)
728 return err;
729
334f485d 730 fuse_copy_finish(cs);
dd3bb14f
MS
731 if (cs->pipebufs) {
732 struct pipe_buffer *buf = cs->pipebufs;
733
c3021629 734 if (!cs->write) {
fba597db 735 err = pipe_buf_confirm(cs->pipe, buf);
c3021629
MS
736 if (err)
737 return err;
738
739 BUG_ON(!cs->nr_segs);
740 cs->currbuf = buf;
c55a01d3
MS
741 cs->pg = buf->page;
742 cs->offset = buf->offset;
c3021629 743 cs->len = buf->len;
c3021629
MS
744 cs->pipebufs++;
745 cs->nr_segs--;
746 } else {
c3021629
MS
747 if (cs->nr_segs == cs->pipe->buffers)
748 return -EIO;
749
750 page = alloc_page(GFP_HIGHUSER);
751 if (!page)
752 return -ENOMEM;
753
754 buf->page = page;
755 buf->offset = 0;
756 buf->len = 0;
757
758 cs->currbuf = buf;
c55a01d3
MS
759 cs->pg = page;
760 cs->offset = 0;
c3021629
MS
761 cs->len = PAGE_SIZE;
762 cs->pipebufs++;
763 cs->nr_segs++;
764 }
dd3bb14f 765 } else {
6c09e94a
AV
766 size_t off;
767 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
dd3bb14f
MS
768 if (err < 0)
769 return err;
6c09e94a
AV
770 BUG_ON(!err);
771 cs->len = err;
772 cs->offset = off;
c55a01d3 773 cs->pg = page;
6c09e94a 774 iov_iter_advance(cs->iter, err);
334f485d 775 }
334f485d 776
dc00809a 777 return lock_request(cs->req);
334f485d
MS
778}
779
780/* Do as much copy to/from userspace buffer as we can */
8bfc016d 781static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
334f485d
MS
782{
783 unsigned ncpy = min(*size, cs->len);
784 if (val) {
c55a01d3
MS
785 void *pgaddr = kmap_atomic(cs->pg);
786 void *buf = pgaddr + cs->offset;
787
334f485d 788 if (cs->write)
c55a01d3 789 memcpy(buf, *val, ncpy);
334f485d 790 else
c55a01d3
MS
791 memcpy(*val, buf, ncpy);
792
793 kunmap_atomic(pgaddr);
334f485d
MS
794 *val += ncpy;
795 }
796 *size -= ncpy;
797 cs->len -= ncpy;
c55a01d3 798 cs->offset += ncpy;
334f485d
MS
799 return ncpy;
800}
801
ce534fb0
MS
802static int fuse_check_page(struct page *page)
803{
804 if (page_mapcount(page) ||
805 page->mapping != NULL ||
806 page_count(page) != 1 ||
807 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
808 ~(1 << PG_locked |
809 1 << PG_referenced |
810 1 << PG_uptodate |
811 1 << PG_lru |
812 1 << PG_active |
813 1 << PG_reclaim))) {
814 printk(KERN_WARNING "fuse: trying to steal weird page\n");
815 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
816 return 1;
817 }
818 return 0;
819}
820
821static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
822{
823 int err;
824 struct page *oldpage = *pagep;
825 struct page *newpage;
826 struct pipe_buffer *buf = cs->pipebufs;
ce534fb0 827
dc00809a 828 err = unlock_request(cs->req);
0d8e84b0
MS
829 if (err)
830 return err;
831
ce534fb0
MS
832 fuse_copy_finish(cs);
833
fba597db 834 err = pipe_buf_confirm(cs->pipe, buf);
ce534fb0
MS
835 if (err)
836 return err;
837
838 BUG_ON(!cs->nr_segs);
839 cs->currbuf = buf;
840 cs->len = buf->len;
841 cs->pipebufs++;
842 cs->nr_segs--;
843
844 if (cs->len != PAGE_SIZE)
845 goto out_fallback;
846
ca76f5b6 847 if (pipe_buf_steal(cs->pipe, buf) != 0)
ce534fb0
MS
848 goto out_fallback;
849
850 newpage = buf->page;
851
aa991b3b
MS
852 if (!PageUptodate(newpage))
853 SetPageUptodate(newpage);
ce534fb0
MS
854
855 ClearPageMappedToDisk(newpage);
856
857 if (fuse_check_page(newpage) != 0)
858 goto out_fallback_unlock;
859
ce534fb0
MS
860 /*
861 * This is a new and locked page, it shouldn't be mapped or
862 * have any special flags on it
863 */
864 if (WARN_ON(page_mapped(oldpage)))
865 goto out_fallback_unlock;
866 if (WARN_ON(page_has_private(oldpage)))
867 goto out_fallback_unlock;
868 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
869 goto out_fallback_unlock;
870 if (WARN_ON(PageMlocked(oldpage)))
871 goto out_fallback_unlock;
872
ef6a3c63 873 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
ce534fb0 874 if (err) {
ef6a3c63
MS
875 unlock_page(newpage);
876 return err;
ce534fb0 877 }
ef6a3c63 878
09cbfeaf 879 get_page(newpage);
ce534fb0
MS
880
881 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
882 lru_cache_add_file(newpage);
883
884 err = 0;
dc00809a 885 spin_lock(&cs->req->waitq.lock);
825d6d33 886 if (test_bit(FR_ABORTED, &cs->req->flags))
ce534fb0
MS
887 err = -ENOENT;
888 else
889 *pagep = newpage;
dc00809a 890 spin_unlock(&cs->req->waitq.lock);
ce534fb0
MS
891
892 if (err) {
893 unlock_page(newpage);
09cbfeaf 894 put_page(newpage);
ce534fb0
MS
895 return err;
896 }
897
898 unlock_page(oldpage);
09cbfeaf 899 put_page(oldpage);
ce534fb0
MS
900 cs->len = 0;
901
902 return 0;
903
904out_fallback_unlock:
905 unlock_page(newpage);
906out_fallback:
c55a01d3
MS
907 cs->pg = buf->page;
908 cs->offset = buf->offset;
ce534fb0 909
dc00809a 910 err = lock_request(cs->req);
ce534fb0
MS
911 if (err)
912 return err;
913
914 return 1;
915}
916
c3021629
MS
917static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
918 unsigned offset, unsigned count)
919{
920 struct pipe_buffer *buf;
0d8e84b0 921 int err;
c3021629
MS
922
923 if (cs->nr_segs == cs->pipe->buffers)
924 return -EIO;
925
dc00809a 926 err = unlock_request(cs->req);
0d8e84b0
MS
927 if (err)
928 return err;
929
c3021629
MS
930 fuse_copy_finish(cs);
931
932 buf = cs->pipebufs;
09cbfeaf 933 get_page(page);
c3021629
MS
934 buf->page = page;
935 buf->offset = offset;
936 buf->len = count;
937
938 cs->pipebufs++;
939 cs->nr_segs++;
940 cs->len = 0;
941
942 return 0;
943}
944
334f485d
MS
945/*
946 * Copy a page in the request to/from the userspace buffer. Must be
947 * done atomically
948 */
ce534fb0 949static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
8bfc016d 950 unsigned offset, unsigned count, int zeroing)
334f485d 951{
ce534fb0
MS
952 int err;
953 struct page *page = *pagep;
954
b6777c40
MS
955 if (page && zeroing && count < PAGE_SIZE)
956 clear_highpage(page);
957
334f485d 958 while (count) {
c3021629
MS
959 if (cs->write && cs->pipebufs && page) {
960 return fuse_ref_page(cs, page, offset, count);
961 } else if (!cs->len) {
ce534fb0
MS
962 if (cs->move_pages && page &&
963 offset == 0 && count == PAGE_SIZE) {
964 err = fuse_try_move_page(cs, pagep);
965 if (err <= 0)
966 return err;
967 } else {
968 err = fuse_copy_fill(cs);
969 if (err)
970 return err;
971 }
1729a16c 972 }
334f485d 973 if (page) {
2408f6ef 974 void *mapaddr = kmap_atomic(page);
334f485d
MS
975 void *buf = mapaddr + offset;
976 offset += fuse_copy_do(cs, &buf, &count);
2408f6ef 977 kunmap_atomic(mapaddr);
334f485d
MS
978 } else
979 offset += fuse_copy_do(cs, NULL, &count);
980 }
981 if (page && !cs->write)
982 flush_dcache_page(page);
983 return 0;
984}
985
986/* Copy pages in the request to/from userspace buffer */
987static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
988 int zeroing)
989{
990 unsigned i;
991 struct fuse_req *req = cs->req;
334f485d
MS
992
993 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
ce534fb0 994 int err;
85f40aec
MP
995 unsigned offset = req->page_descs[i].offset;
996 unsigned count = min(nbytes, req->page_descs[i].length);
ce534fb0
MS
997
998 err = fuse_copy_page(cs, &req->pages[i], offset, count,
999 zeroing);
334f485d
MS
1000 if (err)
1001 return err;
1002
1003 nbytes -= count;
334f485d
MS
1004 }
1005 return 0;
1006}
1007
1008/* Copy a single argument in the request to/from userspace buffer */
1009static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1010{
1011 while (size) {
1729a16c
MS
1012 if (!cs->len) {
1013 int err = fuse_copy_fill(cs);
1014 if (err)
1015 return err;
1016 }
334f485d
MS
1017 fuse_copy_do(cs, &val, &size);
1018 }
1019 return 0;
1020}
1021
1022/* Copy request arguments to/from userspace buffer */
1023static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1024 unsigned argpages, struct fuse_arg *args,
1025 int zeroing)
1026{
1027 int err = 0;
1028 unsigned i;
1029
1030 for (i = 0; !err && i < numargs; i++) {
1031 struct fuse_arg *arg = &args[i];
1032 if (i == numargs - 1 && argpages)
1033 err = fuse_copy_pages(cs, arg->size, zeroing);
1034 else
1035 err = fuse_copy_one(cs, arg->value, arg->size);
1036 }
1037 return err;
1038}
1039
f88996a9 1040static int forget_pending(struct fuse_iqueue *fiq)
07e77dca 1041{
f88996a9 1042 return fiq->forget_list_head.next != NULL;
07e77dca
MS
1043}
1044
f88996a9 1045static int request_pending(struct fuse_iqueue *fiq)
a4d27e75 1046{
f88996a9
MS
1047 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1048 forget_pending(fiq);
a4d27e75
MS
1049}
1050
a4d27e75
MS
1051/*
1052 * Transfer an interrupt request to userspace
1053 *
1054 * Unlike other requests this is assembled on demand, without a need
1055 * to allocate a separate fuse_req structure.
1056 *
fd22d62e 1057 * Called with fiq->waitq.lock held, releases it
a4d27e75 1058 */
fd22d62e
MS
1059static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1060 struct fuse_copy_state *cs,
c3021629 1061 size_t nbytes, struct fuse_req *req)
fd22d62e 1062__releases(fiq->waitq.lock)
a4d27e75 1063{
a4d27e75
MS
1064 struct fuse_in_header ih;
1065 struct fuse_interrupt_in arg;
1066 unsigned reqsize = sizeof(ih) + sizeof(arg);
1067 int err;
1068
1069 list_del_init(&req->intr_entry);
4ce60812 1070 req->intr_unique = fuse_get_unique(fiq);
a4d27e75
MS
1071 memset(&ih, 0, sizeof(ih));
1072 memset(&arg, 0, sizeof(arg));
1073 ih.len = reqsize;
1074 ih.opcode = FUSE_INTERRUPT;
1075 ih.unique = req->intr_unique;
1076 arg.unique = req->in.h.unique;
1077
4ce60812 1078 spin_unlock(&fiq->waitq.lock);
c3021629 1079 if (nbytes < reqsize)
a4d27e75
MS
1080 return -EINVAL;
1081
c3021629 1082 err = fuse_copy_one(cs, &ih, sizeof(ih));
a4d27e75 1083 if (!err)
c3021629
MS
1084 err = fuse_copy_one(cs, &arg, sizeof(arg));
1085 fuse_copy_finish(cs);
a4d27e75
MS
1086
1087 return err ? err : reqsize;
1088}
1089
f88996a9 1090static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
02c048b9
MS
1091 unsigned max,
1092 unsigned *countp)
07e77dca 1093{
f88996a9 1094 struct fuse_forget_link *head = fiq->forget_list_head.next;
02c048b9
MS
1095 struct fuse_forget_link **newhead = &head;
1096 unsigned count;
07e77dca 1097
02c048b9
MS
1098 for (count = 0; *newhead != NULL && count < max; count++)
1099 newhead = &(*newhead)->next;
1100
f88996a9 1101 fiq->forget_list_head.next = *newhead;
02c048b9 1102 *newhead = NULL;
f88996a9
MS
1103 if (fiq->forget_list_head.next == NULL)
1104 fiq->forget_list_tail = &fiq->forget_list_head;
07e77dca 1105
02c048b9
MS
1106 if (countp != NULL)
1107 *countp = count;
1108
1109 return head;
07e77dca
MS
1110}
1111
fd22d62e 1112static int fuse_read_single_forget(struct fuse_iqueue *fiq,
07e77dca
MS
1113 struct fuse_copy_state *cs,
1114 size_t nbytes)
fd22d62e 1115__releases(fiq->waitq.lock)
07e77dca
MS
1116{
1117 int err;
f88996a9 1118 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
07e77dca 1119 struct fuse_forget_in arg = {
02c048b9 1120 .nlookup = forget->forget_one.nlookup,
07e77dca
MS
1121 };
1122 struct fuse_in_header ih = {
1123 .opcode = FUSE_FORGET,
02c048b9 1124 .nodeid = forget->forget_one.nodeid,
f88996a9 1125 .unique = fuse_get_unique(fiq),
07e77dca
MS
1126 .len = sizeof(ih) + sizeof(arg),
1127 };
1128
4ce60812 1129 spin_unlock(&fiq->waitq.lock);
07e77dca
MS
1130 kfree(forget);
1131 if (nbytes < ih.len)
1132 return -EINVAL;
1133
1134 err = fuse_copy_one(cs, &ih, sizeof(ih));
1135 if (!err)
1136 err = fuse_copy_one(cs, &arg, sizeof(arg));
1137 fuse_copy_finish(cs);
1138
1139 if (err)
1140 return err;
1141
1142 return ih.len;
1143}
1144
fd22d62e 1145static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
02c048b9 1146 struct fuse_copy_state *cs, size_t nbytes)
fd22d62e 1147__releases(fiq->waitq.lock)
02c048b9
MS
1148{
1149 int err;
1150 unsigned max_forgets;
1151 unsigned count;
1152 struct fuse_forget_link *head;
1153 struct fuse_batch_forget_in arg = { .count = 0 };
1154 struct fuse_in_header ih = {
1155 .opcode = FUSE_BATCH_FORGET,
f88996a9 1156 .unique = fuse_get_unique(fiq),
02c048b9
MS
1157 .len = sizeof(ih) + sizeof(arg),
1158 };
1159
1160 if (nbytes < ih.len) {
4ce60812 1161 spin_unlock(&fiq->waitq.lock);
02c048b9
MS
1162 return -EINVAL;
1163 }
1164
1165 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
f88996a9 1166 head = dequeue_forget(fiq, max_forgets, &count);
4ce60812 1167 spin_unlock(&fiq->waitq.lock);
02c048b9
MS
1168
1169 arg.count = count;
1170 ih.len += count * sizeof(struct fuse_forget_one);
1171 err = fuse_copy_one(cs, &ih, sizeof(ih));
1172 if (!err)
1173 err = fuse_copy_one(cs, &arg, sizeof(arg));
1174
1175 while (head) {
1176 struct fuse_forget_link *forget = head;
1177
1178 if (!err) {
1179 err = fuse_copy_one(cs, &forget->forget_one,
1180 sizeof(forget->forget_one));
1181 }
1182 head = forget->next;
1183 kfree(forget);
1184 }
1185
1186 fuse_copy_finish(cs);
1187
1188 if (err)
1189 return err;
1190
1191 return ih.len;
1192}
1193
fd22d62e
MS
1194static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1195 struct fuse_copy_state *cs,
02c048b9 1196 size_t nbytes)
fd22d62e 1197__releases(fiq->waitq.lock)
02c048b9 1198{
f88996a9 1199 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
fd22d62e 1200 return fuse_read_single_forget(fiq, cs, nbytes);
02c048b9 1201 else
fd22d62e 1202 return fuse_read_batch_forget(fiq, cs, nbytes);
02c048b9
MS
1203}
1204
334f485d
MS
1205/*
1206 * Read a single request into the userspace filesystem's buffer. This
1207 * function waits until a request is available, then removes it from
1208 * the pending list and copies request data to userspace buffer. If
f9a2842e
MS
1209 * no reply is needed (FORGET) or request has been aborted or there
1210 * was an error during the copying then it's finished by calling
334f485d
MS
1211 * request_end(). Otherwise add it to the processing list, and set
1212 * the 'sent' flag.
1213 */
c3696046 1214static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
c3021629 1215 struct fuse_copy_state *cs, size_t nbytes)
334f485d 1216{
82cbdcd3 1217 ssize_t err;
c3696046 1218 struct fuse_conn *fc = fud->fc;
f88996a9 1219 struct fuse_iqueue *fiq = &fc->iq;
c3696046 1220 struct fuse_pqueue *fpq = &fud->pq;
334f485d
MS
1221 struct fuse_req *req;
1222 struct fuse_in *in;
334f485d
MS
1223 unsigned reqsize;
1224
0b6e9ea0
SF
1225 if (task_active_pid_ns(current) != fc->pid_ns)
1226 return -EIO;
1227
1d3d752b 1228 restart:
4ce60812 1229 spin_lock(&fiq->waitq.lock);
e5ac1d1e 1230 err = -EAGAIN;
e16714d8 1231 if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
f88996a9 1232 !request_pending(fiq))
e5ac1d1e
JD
1233 goto err_unlock;
1234
5250921b
MS
1235 err = wait_event_interruptible_exclusive_locked(fiq->waitq,
1236 !fiq->connected || request_pending(fiq));
1237 if (err)
1238 goto err_unlock;
1239
334f485d 1240 err = -ENODEV;
e16714d8 1241 if (!fiq->connected)
334f485d 1242 goto err_unlock;
334f485d 1243
f88996a9
MS
1244 if (!list_empty(&fiq->interrupts)) {
1245 req = list_entry(fiq->interrupts.next, struct fuse_req,
a4d27e75 1246 intr_entry);
fd22d62e 1247 return fuse_read_interrupt(fiq, cs, nbytes, req);
a4d27e75
MS
1248 }
1249
f88996a9
MS
1250 if (forget_pending(fiq)) {
1251 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
fd22d62e 1252 return fuse_read_forget(fc, fiq, cs, nbytes);
07e77dca 1253
f88996a9
MS
1254 if (fiq->forget_batch <= -8)
1255 fiq->forget_batch = 16;
07e77dca
MS
1256 }
1257
f88996a9 1258 req = list_entry(fiq->pending.next, struct fuse_req, list);
33e14b4d 1259 clear_bit(FR_PENDING, &req->flags);
ef759258 1260 list_del_init(&req->list);
4ce60812
MS
1261 spin_unlock(&fiq->waitq.lock);
1262
334f485d 1263 in = &req->in;
1d3d752b
MS
1264 reqsize = in->h.len;
1265 /* If request is too large, reply with an error and restart the read */
c3021629 1266 if (nbytes < reqsize) {
1d3d752b
MS
1267 req->out.h.error = -EIO;
1268 /* SETXATTR is special, since it may contain too large data */
1269 if (in->h.opcode == FUSE_SETXATTR)
1270 req->out.h.error = -E2BIG;
1271 request_end(fc, req);
1272 goto restart;
334f485d 1273 }
45a91cb1 1274 spin_lock(&fpq->lock);
82cbdcd3 1275 list_add(&req->list, &fpq->io);
45a91cb1 1276 spin_unlock(&fpq->lock);
c3021629
MS
1277 cs->req = req;
1278 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1d3d752b 1279 if (!err)
c3021629 1280 err = fuse_copy_args(cs, in->numargs, in->argpages,
1d3d752b 1281 (struct fuse_arg *) in->args, 0);
c3021629 1282 fuse_copy_finish(cs);
45a91cb1 1283 spin_lock(&fpq->lock);
825d6d33 1284 clear_bit(FR_LOCKED, &req->flags);
e96edd94 1285 if (!fpq->connected) {
82cbdcd3
MS
1286 err = -ENODEV;
1287 goto out_end;
c9c9d7df 1288 }
334f485d 1289 if (err) {
c9c9d7df 1290 req->out.h.error = -EIO;
82cbdcd3 1291 goto out_end;
334f485d 1292 }
825d6d33 1293 if (!test_bit(FR_ISREPLY, &req->flags)) {
82cbdcd3
MS
1294 err = reqsize;
1295 goto out_end;
334f485d 1296 }
82cbdcd3 1297 list_move_tail(&req->list, &fpq->processing);
45a91cb1 1298 spin_unlock(&fpq->lock);
82cbdcd3
MS
1299 set_bit(FR_SENT, &req->flags);
1300 /* matches barrier in request_wait_answer() */
1301 smp_mb__after_atomic();
1302 if (test_bit(FR_INTERRUPTED, &req->flags))
1303 queue_interrupt(fiq, req);
82cbdcd3 1304
334f485d
MS
1305 return reqsize;
1306
82cbdcd3 1307out_end:
77cd9d48
MS
1308 if (!test_bit(FR_PRIVATE, &req->flags))
1309 list_del_init(&req->list);
45a91cb1 1310 spin_unlock(&fpq->lock);
82cbdcd3
MS
1311 request_end(fc, req);
1312 return err;
1313
334f485d 1314 err_unlock:
4ce60812 1315 spin_unlock(&fiq->waitq.lock);
334f485d
MS
1316 return err;
1317}
1318
94e4fe2c
TVB
1319static int fuse_dev_open(struct inode *inode, struct file *file)
1320{
1321 /*
1322 * The fuse device's file's private_data is used to hold
1323 * the fuse_conn(ection) when it is mounted, and is used to
1324 * keep track of whether the file has been mounted already.
1325 */
1326 file->private_data = NULL;
1327 return 0;
1328}
1329
fbdbacca 1330static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
c3021629
MS
1331{
1332 struct fuse_copy_state cs;
1333 struct file *file = iocb->ki_filp;
cc080e9e
MS
1334 struct fuse_dev *fud = fuse_get_dev(file);
1335
1336 if (!fud)
c3021629
MS
1337 return -EPERM;
1338
fbdbacca
AV
1339 if (!iter_is_iovec(to))
1340 return -EINVAL;
1341
dc00809a 1342 fuse_copy_init(&cs, 1, to);
c3021629 1343
c3696046 1344 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
c3021629
MS
1345}
1346
c3021629
MS
1347static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1348 struct pipe_inode_info *pipe,
1349 size_t len, unsigned int flags)
1350{
d82718e3 1351 int total, ret;
c3021629 1352 int page_nr = 0;
c3021629
MS
1353 struct pipe_buffer *bufs;
1354 struct fuse_copy_state cs;
cc080e9e
MS
1355 struct fuse_dev *fud = fuse_get_dev(in);
1356
1357 if (!fud)
c3021629
MS
1358 return -EPERM;
1359
07e77dca 1360 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
c3021629
MS
1361 if (!bufs)
1362 return -ENOMEM;
1363
dc00809a 1364 fuse_copy_init(&cs, 1, NULL);
c3021629
MS
1365 cs.pipebufs = bufs;
1366 cs.pipe = pipe;
c3696046 1367 ret = fuse_dev_do_read(fud, in, &cs, len);
c3021629
MS
1368 if (ret < 0)
1369 goto out;
1370
c3021629
MS
1371 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1372 ret = -EIO;
d82718e3 1373 goto out;
c3021629
MS
1374 }
1375
d82718e3 1376 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
28a625cb
MS
1377 /*
1378 * Need to be careful about this. Having buf->ops in module
1379 * code can Oops if the buffer persists after module unload.
1380 */
d82718e3 1381 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
84588a93 1382 bufs[page_nr].flags = 0;
d82718e3
AV
1383 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1384 if (unlikely(ret < 0))
1385 break;
c3021629 1386 }
d82718e3
AV
1387 if (total)
1388 ret = total;
c3021629
MS
1389out:
1390 for (; page_nr < cs.nr_segs; page_nr++)
09cbfeaf 1391 put_page(bufs[page_nr].page);
c3021629
MS
1392
1393 kfree(bufs);
1394 return ret;
1395}
1396
95668a69
TH
1397static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1398 struct fuse_copy_state *cs)
1399{
1400 struct fuse_notify_poll_wakeup_out outarg;
f6d47a17 1401 int err = -EINVAL;
95668a69
TH
1402
1403 if (size != sizeof(outarg))
f6d47a17 1404 goto err;
95668a69
TH
1405
1406 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1407 if (err)
f6d47a17 1408 goto err;
95668a69 1409
f6d47a17 1410 fuse_copy_finish(cs);
95668a69 1411 return fuse_notify_poll_wakeup(fc, &outarg);
f6d47a17
MS
1412
1413err:
1414 fuse_copy_finish(cs);
1415 return err;
95668a69
TH
1416}
1417
3b463ae0
JM
1418static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1419 struct fuse_copy_state *cs)
1420{
1421 struct fuse_notify_inval_inode_out outarg;
1422 int err = -EINVAL;
1423
1424 if (size != sizeof(outarg))
1425 goto err;
1426
1427 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1428 if (err)
1429 goto err;
1430 fuse_copy_finish(cs);
1431
1432 down_read(&fc->killsb);
1433 err = -ENOENT;
b21dda43
MS
1434 if (fc->sb) {
1435 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1436 outarg.off, outarg.len);
1437 }
3b463ae0
JM
1438 up_read(&fc->killsb);
1439 return err;
1440
1441err:
1442 fuse_copy_finish(cs);
1443 return err;
1444}
1445
1446static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1447 struct fuse_copy_state *cs)
1448{
1449 struct fuse_notify_inval_entry_out outarg;
b2d82ee3
FW
1450 int err = -ENOMEM;
1451 char *buf;
3b463ae0
JM
1452 struct qstr name;
1453
b2d82ee3
FW
1454 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1455 if (!buf)
1456 goto err;
1457
1458 err = -EINVAL;
3b463ae0
JM
1459 if (size < sizeof(outarg))
1460 goto err;
1461
1462 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1463 if (err)
1464 goto err;
1465
1466 err = -ENAMETOOLONG;
1467 if (outarg.namelen > FUSE_NAME_MAX)
1468 goto err;
1469
c2183d1e
MS
1470 err = -EINVAL;
1471 if (size != sizeof(outarg) + outarg.namelen + 1)
1472 goto err;
1473
3b463ae0
JM
1474 name.name = buf;
1475 name.len = outarg.namelen;
1476 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1477 if (err)
1478 goto err;
1479 fuse_copy_finish(cs);
1480 buf[outarg.namelen] = 0;
3b463ae0
JM
1481
1482 down_read(&fc->killsb);
1483 err = -ENOENT;
b21dda43 1484 if (fc->sb)
451d0f59
JM
1485 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1486 up_read(&fc->killsb);
1487 kfree(buf);
1488 return err;
1489
1490err:
1491 kfree(buf);
1492 fuse_copy_finish(cs);
1493 return err;
1494}
1495
1496static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1497 struct fuse_copy_state *cs)
1498{
1499 struct fuse_notify_delete_out outarg;
1500 int err = -ENOMEM;
1501 char *buf;
1502 struct qstr name;
1503
1504 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1505 if (!buf)
1506 goto err;
1507
1508 err = -EINVAL;
1509 if (size < sizeof(outarg))
1510 goto err;
1511
1512 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1513 if (err)
1514 goto err;
1515
1516 err = -ENAMETOOLONG;
1517 if (outarg.namelen > FUSE_NAME_MAX)
1518 goto err;
1519
1520 err = -EINVAL;
1521 if (size != sizeof(outarg) + outarg.namelen + 1)
1522 goto err;
1523
1524 name.name = buf;
1525 name.len = outarg.namelen;
1526 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1527 if (err)
1528 goto err;
1529 fuse_copy_finish(cs);
1530 buf[outarg.namelen] = 0;
451d0f59
JM
1531
1532 down_read(&fc->killsb);
1533 err = -ENOENT;
1534 if (fc->sb)
1535 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1536 outarg.child, &name);
3b463ae0 1537 up_read(&fc->killsb);
b2d82ee3 1538 kfree(buf);
3b463ae0
JM
1539 return err;
1540
1541err:
b2d82ee3 1542 kfree(buf);
3b463ae0
JM
1543 fuse_copy_finish(cs);
1544 return err;
1545}
1546
a1d75f25
MS
1547static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1548 struct fuse_copy_state *cs)
1549{
1550 struct fuse_notify_store_out outarg;
1551 struct inode *inode;
1552 struct address_space *mapping;
1553 u64 nodeid;
1554 int err;
1555 pgoff_t index;
1556 unsigned int offset;
1557 unsigned int num;
1558 loff_t file_size;
1559 loff_t end;
1560
1561 err = -EINVAL;
1562 if (size < sizeof(outarg))
1563 goto out_finish;
1564
1565 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1566 if (err)
1567 goto out_finish;
1568
1569 err = -EINVAL;
1570 if (size - sizeof(outarg) != outarg.size)
1571 goto out_finish;
1572
1573 nodeid = outarg.nodeid;
1574
1575 down_read(&fc->killsb);
1576
1577 err = -ENOENT;
1578 if (!fc->sb)
1579 goto out_up_killsb;
1580
1581 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1582 if (!inode)
1583 goto out_up_killsb;
1584
1585 mapping = inode->i_mapping;
09cbfeaf
KS
1586 index = outarg.offset >> PAGE_SHIFT;
1587 offset = outarg.offset & ~PAGE_MASK;
a1d75f25
MS
1588 file_size = i_size_read(inode);
1589 end = outarg.offset + outarg.size;
1590 if (end > file_size) {
1591 file_size = end;
1592 fuse_write_update_size(inode, file_size);
1593 }
1594
1595 num = outarg.size;
1596 while (num) {
1597 struct page *page;
1598 unsigned int this_num;
1599
1600 err = -ENOMEM;
1601 page = find_or_create_page(mapping, index,
1602 mapping_gfp_mask(mapping));
1603 if (!page)
1604 goto out_iput;
1605
09cbfeaf 1606 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
a1d75f25 1607 err = fuse_copy_page(cs, &page, offset, this_num, 0);
063ec1e5 1608 if (!err && offset == 0 &&
09cbfeaf 1609 (this_num == PAGE_SIZE || file_size == end))
a1d75f25
MS
1610 SetPageUptodate(page);
1611 unlock_page(page);
09cbfeaf 1612 put_page(page);
a1d75f25
MS
1613
1614 if (err)
1615 goto out_iput;
1616
1617 num -= this_num;
1618 offset = 0;
1619 index++;
1620 }
1621
1622 err = 0;
1623
1624out_iput:
1625 iput(inode);
1626out_up_killsb:
1627 up_read(&fc->killsb);
1628out_finish:
1629 fuse_copy_finish(cs);
1630 return err;
1631}
1632
2d45ba38
MS
1633static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1634{
b745bc85 1635 release_pages(req->pages, req->num_pages, false);
2d45ba38
MS
1636}
1637
1638static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1639 struct fuse_notify_retrieve_out *outarg)
1640{
1641 int err;
1642 struct address_space *mapping = inode->i_mapping;
1643 struct fuse_req *req;
1644 pgoff_t index;
1645 loff_t file_size;
1646 unsigned int num;
1647 unsigned int offset;
0157443c 1648 size_t total_len = 0;
4d53dc99 1649 int num_pages;
2d45ba38 1650
09cbfeaf 1651 offset = outarg->offset & ~PAGE_MASK;
4d53dc99
MP
1652 file_size = i_size_read(inode);
1653
1654 num = outarg->size;
1655 if (outarg->offset > file_size)
1656 num = 0;
1657 else if (outarg->offset + num > file_size)
1658 num = file_size - outarg->offset;
1659
1660 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1661 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1662
1663 req = fuse_get_req(fc, num_pages);
2d45ba38
MS
1664 if (IS_ERR(req))
1665 return PTR_ERR(req);
1666
2d45ba38
MS
1667 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1668 req->in.h.nodeid = outarg->nodeid;
1669 req->in.numargs = 2;
1670 req->in.argpages = 1;
b2430d75 1671 req->page_descs[0].offset = offset;
2d45ba38
MS
1672 req->end = fuse_retrieve_end;
1673
09cbfeaf 1674 index = outarg->offset >> PAGE_SHIFT;
2d45ba38 1675
4d53dc99 1676 while (num && req->num_pages < num_pages) {
2d45ba38
MS
1677 struct page *page;
1678 unsigned int this_num;
1679
1680 page = find_get_page(mapping, index);
1681 if (!page)
1682 break;
1683
09cbfeaf 1684 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
2d45ba38 1685 req->pages[req->num_pages] = page;
85f40aec 1686 req->page_descs[req->num_pages].length = this_num;
2d45ba38
MS
1687 req->num_pages++;
1688
c9e67d48 1689 offset = 0;
2d45ba38
MS
1690 num -= this_num;
1691 total_len += this_num;
48706d0a 1692 index++;
2d45ba38
MS
1693 }
1694 req->misc.retrieve_in.offset = outarg->offset;
1695 req->misc.retrieve_in.size = total_len;
1696 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1697 req->in.args[0].value = &req->misc.retrieve_in;
1698 req->in.args[1].size = total_len;
1699
1700 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1701 if (err)
1702 fuse_retrieve_end(fc, req);
1703
1704 return err;
1705}
1706
1707static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1708 struct fuse_copy_state *cs)
1709{
1710 struct fuse_notify_retrieve_out outarg;
1711 struct inode *inode;
1712 int err;
1713
1714 err = -EINVAL;
1715 if (size != sizeof(outarg))
1716 goto copy_finish;
1717
1718 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1719 if (err)
1720 goto copy_finish;
1721
1722 fuse_copy_finish(cs);
1723
1724 down_read(&fc->killsb);
1725 err = -ENOENT;
1726 if (fc->sb) {
1727 u64 nodeid = outarg.nodeid;
1728
1729 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1730 if (inode) {
1731 err = fuse_retrieve(fc, inode, &outarg);
1732 iput(inode);
1733 }
1734 }
1735 up_read(&fc->killsb);
1736
1737 return err;
1738
1739copy_finish:
1740 fuse_copy_finish(cs);
1741 return err;
1742}
1743
8599396b
TH
1744static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1745 unsigned int size, struct fuse_copy_state *cs)
1746{
0d278362
MS
1747 /* Don't try to move pages (yet) */
1748 cs->move_pages = 0;
1749
8599396b 1750 switch (code) {
95668a69
TH
1751 case FUSE_NOTIFY_POLL:
1752 return fuse_notify_poll(fc, size, cs);
1753
3b463ae0
JM
1754 case FUSE_NOTIFY_INVAL_INODE:
1755 return fuse_notify_inval_inode(fc, size, cs);
1756
1757 case FUSE_NOTIFY_INVAL_ENTRY:
1758 return fuse_notify_inval_entry(fc, size, cs);
1759
a1d75f25
MS
1760 case FUSE_NOTIFY_STORE:
1761 return fuse_notify_store(fc, size, cs);
1762
2d45ba38
MS
1763 case FUSE_NOTIFY_RETRIEVE:
1764 return fuse_notify_retrieve(fc, size, cs);
1765
451d0f59
JM
1766 case FUSE_NOTIFY_DELETE:
1767 return fuse_notify_delete(fc, size, cs);
1768
8599396b 1769 default:
f6d47a17 1770 fuse_copy_finish(cs);
8599396b
TH
1771 return -EINVAL;
1772 }
1773}
1774
334f485d 1775/* Look up request on processing list by unique ID */
3a2b5b9c 1776static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
334f485d 1777{
05726aca 1778 struct fuse_req *req;
334f485d 1779
3a2b5b9c 1780 list_for_each_entry(req, &fpq->processing, list) {
a4d27e75 1781 if (req->in.h.unique == unique || req->intr_unique == unique)
334f485d
MS
1782 return req;
1783 }
1784 return NULL;
1785}
1786
1787static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1788 unsigned nbytes)
1789{
1790 unsigned reqsize = sizeof(struct fuse_out_header);
1791
1792 if (out->h.error)
1793 return nbytes != reqsize ? -EINVAL : 0;
1794
1795 reqsize += len_args(out->numargs, out->args);
1796
1797 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1798 return -EINVAL;
1799 else if (reqsize > nbytes) {
1800 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1801 unsigned diffsize = reqsize - nbytes;
1802 if (diffsize > lastarg->size)
1803 return -EINVAL;
1804 lastarg->size -= diffsize;
1805 }
1806 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1807 out->page_zeroing);
1808}
1809
1810/*
1811 * Write a single reply to a request. First the header is copied from
1812 * the write buffer. The request is then searched on the processing
1813 * list by the unique ID found in the header. If found, then remove
1814 * it from the list and copy the rest of the buffer to the request.
1815 * The request is finished by calling request_end()
1816 */
c3696046 1817static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
dd3bb14f 1818 struct fuse_copy_state *cs, size_t nbytes)
334f485d
MS
1819{
1820 int err;
c3696046
MS
1821 struct fuse_conn *fc = fud->fc;
1822 struct fuse_pqueue *fpq = &fud->pq;
334f485d
MS
1823 struct fuse_req *req;
1824 struct fuse_out_header oh;
334f485d 1825
0b6e9ea0
SF
1826 if (task_active_pid_ns(current) != fc->pid_ns)
1827 return -EIO;
1828
334f485d
MS
1829 if (nbytes < sizeof(struct fuse_out_header))
1830 return -EINVAL;
1831
dd3bb14f 1832 err = fuse_copy_one(cs, &oh, sizeof(oh));
334f485d
MS
1833 if (err)
1834 goto err_finish;
8599396b
TH
1835
1836 err = -EINVAL;
1837 if (oh.len != nbytes)
1838 goto err_finish;
1839
1840 /*
1841 * Zero oh.unique indicates unsolicited notification message
1842 * and error contains notification code.
1843 */
1844 if (!oh.unique) {
dd3bb14f 1845 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
8599396b
TH
1846 return err ? err : nbytes;
1847 }
1848
334f485d 1849 err = -EINVAL;
8599396b 1850 if (oh.error <= -1000 || oh.error > 0)
334f485d
MS
1851 goto err_finish;
1852
45a91cb1 1853 spin_lock(&fpq->lock);
69a53bf2 1854 err = -ENOENT;
e96edd94 1855 if (!fpq->connected)
45a91cb1 1856 goto err_unlock_pq;
69a53bf2 1857
3a2b5b9c 1858 req = request_find(fpq, oh.unique);
334f485d 1859 if (!req)
45a91cb1 1860 goto err_unlock_pq;
334f485d 1861
a4d27e75
MS
1862 /* Is it an interrupt reply? */
1863 if (req->intr_unique == oh.unique) {
45a91cb1
MS
1864 spin_unlock(&fpq->lock);
1865
a4d27e75
MS
1866 err = -EINVAL;
1867 if (nbytes != sizeof(struct fuse_out_header))
46c34a34 1868 goto err_finish;
a4d27e75
MS
1869
1870 if (oh.error == -ENOSYS)
1871 fc->no_interrupt = 1;
1872 else if (oh.error == -EAGAIN)
f88996a9 1873 queue_interrupt(&fc->iq, req);
a4d27e75 1874
dd3bb14f 1875 fuse_copy_finish(cs);
a4d27e75
MS
1876 return nbytes;
1877 }
1878
33e14b4d 1879 clear_bit(FR_SENT, &req->flags);
3a2b5b9c 1880 list_move(&req->list, &fpq->io);
334f485d 1881 req->out.h = oh;
825d6d33 1882 set_bit(FR_LOCKED, &req->flags);
45a91cb1 1883 spin_unlock(&fpq->lock);
dd3bb14f 1884 cs->req = req;
ce534fb0
MS
1885 if (!req->out.page_replace)
1886 cs->move_pages = 0;
334f485d 1887
dd3bb14f
MS
1888 err = copy_out_args(cs, &req->out, nbytes);
1889 fuse_copy_finish(cs);
334f485d 1890
45a91cb1 1891 spin_lock(&fpq->lock);
825d6d33 1892 clear_bit(FR_LOCKED, &req->flags);
e96edd94 1893 if (!fpq->connected)
0d8e84b0
MS
1894 err = -ENOENT;
1895 else if (err)
334f485d 1896 req->out.h.error = -EIO;
77cd9d48
MS
1897 if (!test_bit(FR_PRIVATE, &req->flags))
1898 list_del_init(&req->list);
45a91cb1 1899 spin_unlock(&fpq->lock);
46c34a34 1900
334f485d
MS
1901 request_end(fc, req);
1902
1903 return err ? err : nbytes;
1904
45a91cb1
MS
1905 err_unlock_pq:
1906 spin_unlock(&fpq->lock);
334f485d 1907 err_finish:
dd3bb14f 1908 fuse_copy_finish(cs);
334f485d
MS
1909 return err;
1910}
1911
fbdbacca 1912static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
dd3bb14f
MS
1913{
1914 struct fuse_copy_state cs;
cc080e9e
MS
1915 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1916
1917 if (!fud)
dd3bb14f
MS
1918 return -EPERM;
1919
fbdbacca
AV
1920 if (!iter_is_iovec(from))
1921 return -EINVAL;
1922
dc00809a 1923 fuse_copy_init(&cs, 0, from);
dd3bb14f 1924
c3696046 1925 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
dd3bb14f
MS
1926}
1927
1928static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1929 struct file *out, loff_t *ppos,
1930 size_t len, unsigned int flags)
1931{
1932 unsigned nbuf;
1933 unsigned idx;
1934 struct pipe_buffer *bufs;
1935 struct fuse_copy_state cs;
cc080e9e 1936 struct fuse_dev *fud;
dd3bb14f
MS
1937 size_t rem;
1938 ssize_t ret;
1939
cc080e9e
MS
1940 fud = fuse_get_dev(out);
1941 if (!fud)
dd3bb14f
MS
1942 return -EPERM;
1943
07e77dca 1944 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
dd3bb14f
MS
1945 if (!bufs)
1946 return -ENOMEM;
1947
1948 pipe_lock(pipe);
1949 nbuf = 0;
1950 rem = 0;
1951 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1952 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1953
1954 ret = -EINVAL;
1955 if (rem < len) {
1956 pipe_unlock(pipe);
1957 goto out;
1958 }
1959
1960 rem = len;
1961 while (rem) {
1962 struct pipe_buffer *ibuf;
1963 struct pipe_buffer *obuf;
1964
1965 BUG_ON(nbuf >= pipe->buffers);
1966 BUG_ON(!pipe->nrbufs);
1967 ibuf = &pipe->bufs[pipe->curbuf];
1968 obuf = &bufs[nbuf];
1969
1970 if (rem >= ibuf->len) {
1971 *obuf = *ibuf;
1972 ibuf->ops = NULL;
1973 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1974 pipe->nrbufs--;
1975 } else {
7bf2d1df 1976 pipe_buf_get(pipe, ibuf);
dd3bb14f
MS
1977 *obuf = *ibuf;
1978 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1979 obuf->len = rem;
1980 ibuf->offset += obuf->len;
1981 ibuf->len -= obuf->len;
1982 }
1983 nbuf++;
1984 rem -= obuf->len;
1985 }
1986 pipe_unlock(pipe);
1987
dc00809a 1988 fuse_copy_init(&cs, 0, NULL);
dd3bb14f 1989 cs.pipebufs = bufs;
6c09e94a 1990 cs.nr_segs = nbuf;
dd3bb14f
MS
1991 cs.pipe = pipe;
1992
ce534fb0
MS
1993 if (flags & SPLICE_F_MOVE)
1994 cs.move_pages = 1;
1995
c3696046 1996 ret = fuse_dev_do_write(fud, &cs, len);
dd3bb14f 1997
a779638c
MS
1998 for (idx = 0; idx < nbuf; idx++)
1999 pipe_buf_release(pipe, &bufs[idx]);
2000
dd3bb14f
MS
2001out:
2002 kfree(bufs);
2003 return ret;
2004}
2005
334f485d
MS
2006static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2007{
334f485d 2008 unsigned mask = POLLOUT | POLLWRNORM;
f88996a9 2009 struct fuse_iqueue *fiq;
cc080e9e
MS
2010 struct fuse_dev *fud = fuse_get_dev(file);
2011
2012 if (!fud)
7025d9ad 2013 return POLLERR;
334f485d 2014
cc080e9e 2015 fiq = &fud->fc->iq;
f88996a9 2016 poll_wait(file, &fiq->waitq, wait);
334f485d 2017
4ce60812 2018 spin_lock(&fiq->waitq.lock);
e16714d8 2019 if (!fiq->connected)
7025d9ad 2020 mask = POLLERR;
f88996a9 2021 else if (request_pending(fiq))
7025d9ad 2022 mask |= POLLIN | POLLRDNORM;
4ce60812 2023 spin_unlock(&fiq->waitq.lock);
334f485d
MS
2024
2025 return mask;
2026}
2027
69a53bf2
MS
2028/*
2029 * Abort all requests on the given list (pending or processing)
2030 *
d7133114 2031 * This function releases and reacquires fc->lock
69a53bf2 2032 */
334f485d
MS
2033static void end_requests(struct fuse_conn *fc, struct list_head *head)
2034{
2035 while (!list_empty(head)) {
2036 struct fuse_req *req;
2037 req = list_entry(head->next, struct fuse_req, list);
334f485d 2038 req->out.h.error = -ECONNABORTED;
33e14b4d 2039 clear_bit(FR_SENT, &req->flags);
f377cb79 2040 list_del_init(&req->list);
334f485d 2041 request_end(fc, req);
334f485d
MS
2042 }
2043}
2044
357ccf2b
BG
2045static void end_polls(struct fuse_conn *fc)
2046{
2047 struct rb_node *p;
2048
2049 p = rb_first(&fc->polled_files);
2050
2051 while (p) {
2052 struct fuse_file *ff;
2053 ff = rb_entry(p, struct fuse_file, polled_node);
2054 wake_up_interruptible_all(&ff->poll_wait);
2055
2056 p = rb_next(p);
2057 }
2058}
2059
69a53bf2
MS
2060/*
2061 * Abort all requests.
2062 *
b716d425
MS
2063 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2064 * filesystem.
2065 *
2066 * The same effect is usually achievable through killing the filesystem daemon
2067 * and all users of the filesystem. The exception is the combination of an
2068 * asynchronous request and the tricky deadlock (see
2069 * Documentation/filesystems/fuse.txt).
69a53bf2 2070 *
b716d425
MS
2071 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2072 * requests, they should be finished off immediately. Locked requests will be
2073 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2074 * requests. It is possible that some request will finish before we can. This
2075 * is OK, the request will in that case be removed from the list before we touch
2076 * it.
69a53bf2
MS
2077 */
2078void fuse_abort_conn(struct fuse_conn *fc)
2079{
f88996a9
MS
2080 struct fuse_iqueue *fiq = &fc->iq;
2081
d7133114 2082 spin_lock(&fc->lock);
69a53bf2 2083 if (fc->connected) {
c3696046 2084 struct fuse_dev *fud;
b716d425 2085 struct fuse_req *req, *next;
41f98274
MS
2086 LIST_HEAD(to_end1);
2087 LIST_HEAD(to_end2);
b716d425 2088
69a53bf2 2089 fc->connected = 0;
51eb01e7 2090 fc->blocked = 0;
9759bd51 2091 fuse_set_initialized(fc);
c3696046
MS
2092 list_for_each_entry(fud, &fc->devices, entry) {
2093 struct fuse_pqueue *fpq = &fud->pq;
2094
2095 spin_lock(&fpq->lock);
2096 fpq->connected = 0;
2097 list_for_each_entry_safe(req, next, &fpq->io, list) {
2098 req->out.h.error = -ECONNABORTED;
2099 spin_lock(&req->waitq.lock);
2100 set_bit(FR_ABORTED, &req->flags);
2101 if (!test_bit(FR_LOCKED, &req->flags)) {
2102 set_bit(FR_PRIVATE, &req->flags);
2103 list_move(&req->list, &to_end1);
2104 }
2105 spin_unlock(&req->waitq.lock);
77cd9d48 2106 }
c3696046
MS
2107 list_splice_init(&fpq->processing, &to_end2);
2108 spin_unlock(&fpq->lock);
b716d425 2109 }
41f98274
MS
2110 fc->max_background = UINT_MAX;
2111 flush_bg_queue(fc);
8c91189a 2112
4ce60812 2113 spin_lock(&fiq->waitq.lock);
8c91189a 2114 fiq->connected = 0;
f88996a9 2115 list_splice_init(&fiq->pending, &to_end2);
a8a86d78
TE
2116 list_for_each_entry(req, &to_end2, list)
2117 clear_bit(FR_PENDING, &req->flags);
8c91189a
MS
2118 while (forget_pending(fiq))
2119 kfree(dequeue_forget(fiq, 1, NULL));
4ce60812
MS
2120 wake_up_all_locked(&fiq->waitq);
2121 spin_unlock(&fiq->waitq.lock);
8c91189a 2122 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
ee314a87
MS
2123 end_polls(fc);
2124 wake_up_all(&fc->blocked_waitq);
2125 spin_unlock(&fc->lock);
8c91189a 2126
41f98274
MS
2127 while (!list_empty(&to_end1)) {
2128 req = list_first_entry(&to_end1, struct fuse_req, list);
b716d425 2129 __fuse_get_request(req);
f377cb79 2130 list_del_init(&req->list);
b716d425 2131 request_end(fc, req);
b716d425 2132 }
41f98274 2133 end_requests(fc, &to_end2);
ee314a87
MS
2134 } else {
2135 spin_unlock(&fc->lock);
69a53bf2 2136 }
69a53bf2 2137}
08cbf542 2138EXPORT_SYMBOL_GPL(fuse_abort_conn);
69a53bf2 2139
08cbf542 2140int fuse_dev_release(struct inode *inode, struct file *file)
334f485d 2141{
cc080e9e
MS
2142 struct fuse_dev *fud = fuse_get_dev(file);
2143
2144 if (fud) {
2145 struct fuse_conn *fc = fud->fc;
c3696046
MS
2146 struct fuse_pqueue *fpq = &fud->pq;
2147
2148 WARN_ON(!list_empty(&fpq->io));
2149 end_requests(fc, &fpq->processing);
2150 /* Are we the last open device? */
2151 if (atomic_dec_and_test(&fc->dev_count)) {
2152 WARN_ON(fc->iq.fasync != NULL);
2153 fuse_abort_conn(fc);
2154 }
cc080e9e 2155 fuse_dev_free(fud);
385a17bf 2156 }
334f485d
MS
2157 return 0;
2158}
08cbf542 2159EXPORT_SYMBOL_GPL(fuse_dev_release);
334f485d 2160
385a17bf
JD
2161static int fuse_dev_fasync(int fd, struct file *file, int on)
2162{
cc080e9e
MS
2163 struct fuse_dev *fud = fuse_get_dev(file);
2164
2165 if (!fud)
a87046d8 2166 return -EPERM;
385a17bf
JD
2167
2168 /* No locking - fasync_helper does its own locking */
cc080e9e 2169 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
385a17bf
JD
2170}
2171
00c570f4
MS
2172static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2173{
cc080e9e
MS
2174 struct fuse_dev *fud;
2175
00c570f4
MS
2176 if (new->private_data)
2177 return -EINVAL;
2178
cc080e9e
MS
2179 fud = fuse_dev_alloc(fc);
2180 if (!fud)
2181 return -ENOMEM;
2182
2183 new->private_data = fud;
c3696046 2184 atomic_inc(&fc->dev_count);
00c570f4
MS
2185
2186 return 0;
2187}
2188
2189static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2190 unsigned long arg)
2191{
2192 int err = -ENOTTY;
2193
2194 if (cmd == FUSE_DEV_IOC_CLONE) {
2195 int oldfd;
2196
2197 err = -EFAULT;
2198 if (!get_user(oldfd, (__u32 __user *) arg)) {
2199 struct file *old = fget(oldfd);
2200
2201 err = -EINVAL;
2202 if (old) {
8ed1f0e2
JH
2203 struct fuse_dev *fud = NULL;
2204
2205 /*
2206 * Check against file->f_op because CUSE
2207 * uses the same ioctl handler.
2208 */
2209 if (old->f_op == file->f_op &&
2210 old->f_cred->user_ns == file->f_cred->user_ns)
2211 fud = fuse_get_dev(old);
00c570f4 2212
cc080e9e 2213 if (fud) {
00c570f4 2214 mutex_lock(&fuse_mutex);
cc080e9e 2215 err = fuse_device_clone(fud->fc, file);
00c570f4
MS
2216 mutex_unlock(&fuse_mutex);
2217 }
2218 fput(old);
2219 }
2220 }
2221 }
2222 return err;
2223}
2224
4b6f5d20 2225const struct file_operations fuse_dev_operations = {
334f485d 2226 .owner = THIS_MODULE,
94e4fe2c 2227 .open = fuse_dev_open,
334f485d 2228 .llseek = no_llseek,
fbdbacca 2229 .read_iter = fuse_dev_read,
c3021629 2230 .splice_read = fuse_dev_splice_read,
fbdbacca 2231 .write_iter = fuse_dev_write,
dd3bb14f 2232 .splice_write = fuse_dev_splice_write,
334f485d
MS
2233 .poll = fuse_dev_poll,
2234 .release = fuse_dev_release,
385a17bf 2235 .fasync = fuse_dev_fasync,
00c570f4
MS
2236 .unlocked_ioctl = fuse_dev_ioctl,
2237 .compat_ioctl = fuse_dev_ioctl,
334f485d 2238};
08cbf542 2239EXPORT_SYMBOL_GPL(fuse_dev_operations);
334f485d
MS
2240
2241static struct miscdevice fuse_miscdevice = {
2242 .minor = FUSE_MINOR,
2243 .name = "fuse",
2244 .fops = &fuse_dev_operations,
2245};
2246
2247int __init fuse_dev_init(void)
2248{
2249 int err = -ENOMEM;
2250 fuse_req_cachep = kmem_cache_create("fuse_request",
2251 sizeof(struct fuse_req),
20c2df83 2252 0, 0, NULL);
334f485d
MS
2253 if (!fuse_req_cachep)
2254 goto out;
2255
2256 err = misc_register(&fuse_miscdevice);
2257 if (err)
2258 goto out_cache_clean;
2259
2260 return 0;
2261
2262 out_cache_clean:
2263 kmem_cache_destroy(fuse_req_cachep);
2264 out:
2265 return err;
2266}
2267
2268void fuse_dev_cleanup(void)
2269{
2270 misc_deregister(&fuse_miscdevice);
2271 kmem_cache_destroy(fuse_req_cachep);
2272}