]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/fuse/dev.c
fuse: pqueue locking
[mirror_ubuntu-bionic-kernel.git] / fs / fuse / dev.c
CommitLineData
334f485d
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
334f485d
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/uio.h>
15#include <linux/miscdevice.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/slab.h>
dd3bb14f 19#include <linux/pipe_fs_i.h>
ce534fb0
MS
20#include <linux/swap.h>
21#include <linux/splice.h>
334f485d
MS
22
23MODULE_ALIAS_MISCDEV(FUSE_MINOR);
578454ff 24MODULE_ALIAS("devname:fuse");
334f485d 25
e18b890b 26static struct kmem_cache *fuse_req_cachep;
334f485d 27
8bfc016d 28static struct fuse_conn *fuse_get_conn(struct file *file)
334f485d 29{
0720b315
MS
30 /*
31 * Lockless access is OK, because file->private data is set
32 * once during mount and is valid until the file is released.
33 */
34 return file->private_data;
334f485d
MS
35}
36
4250c066 37static void fuse_request_init(struct fuse_req *req, struct page **pages,
b2430d75 38 struct fuse_page_desc *page_descs,
4250c066 39 unsigned npages)
334f485d
MS
40{
41 memset(req, 0, sizeof(*req));
4250c066 42 memset(pages, 0, sizeof(*pages) * npages);
b2430d75 43 memset(page_descs, 0, sizeof(*page_descs) * npages);
334f485d 44 INIT_LIST_HEAD(&req->list);
a4d27e75 45 INIT_LIST_HEAD(&req->intr_entry);
334f485d
MS
46 init_waitqueue_head(&req->waitq);
47 atomic_set(&req->count, 1);
4250c066 48 req->pages = pages;
b2430d75 49 req->page_descs = page_descs;
4250c066 50 req->max_pages = npages;
33e14b4d 51 __set_bit(FR_PENDING, &req->flags);
334f485d
MS
52}
53
4250c066 54static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
334f485d 55{
4250c066
MP
56 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
57 if (req) {
58 struct page **pages;
b2430d75 59 struct fuse_page_desc *page_descs;
4250c066 60
b2430d75 61 if (npages <= FUSE_REQ_INLINE_PAGES) {
4250c066 62 pages = req->inline_pages;
b2430d75
MP
63 page_descs = req->inline_page_descs;
64 } else {
4250c066 65 pages = kmalloc(sizeof(struct page *) * npages, flags);
b2430d75
MP
66 page_descs = kmalloc(sizeof(struct fuse_page_desc) *
67 npages, flags);
68 }
4250c066 69
b2430d75
MP
70 if (!pages || !page_descs) {
71 kfree(pages);
72 kfree(page_descs);
4250c066
MP
73 kmem_cache_free(fuse_req_cachep, req);
74 return NULL;
75 }
76
b2430d75 77 fuse_request_init(req, pages, page_descs, npages);
4250c066 78 }
334f485d
MS
79 return req;
80}
4250c066
MP
81
82struct fuse_req *fuse_request_alloc(unsigned npages)
83{
84 return __fuse_request_alloc(npages, GFP_KERNEL);
85}
08cbf542 86EXPORT_SYMBOL_GPL(fuse_request_alloc);
334f485d 87
4250c066 88struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
3be5a52b 89{
4250c066 90 return __fuse_request_alloc(npages, GFP_NOFS);
3be5a52b
MS
91}
92
334f485d
MS
93void fuse_request_free(struct fuse_req *req)
94{
b2430d75 95 if (req->pages != req->inline_pages) {
4250c066 96 kfree(req->pages);
b2430d75
MP
97 kfree(req->page_descs);
98 }
334f485d
MS
99 kmem_cache_free(fuse_req_cachep, req);
100}
101
8bfc016d 102static void block_sigs(sigset_t *oldset)
334f485d
MS
103{
104 sigset_t mask;
105
106 siginitsetinv(&mask, sigmask(SIGKILL));
107 sigprocmask(SIG_BLOCK, &mask, oldset);
108}
109
8bfc016d 110static void restore_sigs(sigset_t *oldset)
334f485d
MS
111{
112 sigprocmask(SIG_SETMASK, oldset, NULL);
113}
114
36cf66ed 115void __fuse_get_request(struct fuse_req *req)
334f485d
MS
116{
117 atomic_inc(&req->count);
118}
119
120/* Must be called with > 1 refcount */
121static void __fuse_put_request(struct fuse_req *req)
122{
123 BUG_ON(atomic_read(&req->count) < 2);
124 atomic_dec(&req->count);
125}
126
33649c91
MS
127static void fuse_req_init_context(struct fuse_req *req)
128{
499dcf20
EB
129 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
130 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
33649c91
MS
131 req->in.h.pid = current->pid;
132}
133
9759bd51
MS
134void fuse_set_initialized(struct fuse_conn *fc)
135{
136 /* Make sure stores before this are seen on another CPU */
137 smp_wmb();
138 fc->initialized = 1;
139}
140
0aada884
MP
141static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
142{
143 return !fc->initialized || (for_background && fc->blocked);
144}
145
8b41e671
MP
146static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
147 bool for_background)
334f485d 148{
08a53cdc 149 struct fuse_req *req;
08a53cdc 150 int err;
9bc5ddda 151 atomic_inc(&fc->num_waiting);
0aada884
MP
152
153 if (fuse_block_alloc(fc, for_background)) {
154 sigset_t oldset;
155 int intr;
156
157 block_sigs(&oldset);
722d2bea 158 intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
0aada884
MP
159 !fuse_block_alloc(fc, for_background));
160 restore_sigs(&oldset);
161 err = -EINTR;
162 if (intr)
163 goto out;
164 }
9759bd51
MS
165 /* Matches smp_wmb() in fuse_set_initialized() */
166 smp_rmb();
08a53cdc 167
51eb01e7
MS
168 err = -ENOTCONN;
169 if (!fc->connected)
170 goto out;
171
de155226
MS
172 err = -ECONNREFUSED;
173 if (fc->conn_error)
174 goto out;
175
b111c8c0 176 req = fuse_request_alloc(npages);
9bc5ddda 177 err = -ENOMEM;
722d2bea
MP
178 if (!req) {
179 if (for_background)
180 wake_up(&fc->blocked_waitq);
9bc5ddda 181 goto out;
722d2bea 182 }
334f485d 183
33649c91 184 fuse_req_init_context(req);
825d6d33
MS
185 __set_bit(FR_WAITING, &req->flags);
186 if (for_background)
187 __set_bit(FR_BACKGROUND, &req->flags);
188
334f485d 189 return req;
9bc5ddda
MS
190
191 out:
192 atomic_dec(&fc->num_waiting);
193 return ERR_PTR(err);
334f485d 194}
8b41e671
MP
195
196struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
197{
198 return __fuse_get_req(fc, npages, false);
199}
08cbf542 200EXPORT_SYMBOL_GPL(fuse_get_req);
334f485d 201
8b41e671
MP
202struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
203 unsigned npages)
204{
205 return __fuse_get_req(fc, npages, true);
206}
207EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
208
33649c91
MS
209/*
210 * Return request in fuse_file->reserved_req. However that may
211 * currently be in use. If that is the case, wait for it to become
212 * available.
213 */
214static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
215 struct file *file)
216{
217 struct fuse_req *req = NULL;
218 struct fuse_file *ff = file->private_data;
219
220 do {
de5e3dec 221 wait_event(fc->reserved_req_waitq, ff->reserved_req);
33649c91
MS
222 spin_lock(&fc->lock);
223 if (ff->reserved_req) {
224 req = ff->reserved_req;
225 ff->reserved_req = NULL;
cb0942b8 226 req->stolen_file = get_file(file);
33649c91
MS
227 }
228 spin_unlock(&fc->lock);
229 } while (!req);
230
231 return req;
232}
233
234/*
235 * Put stolen request back into fuse_file->reserved_req
236 */
237static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
238{
239 struct file *file = req->stolen_file;
240 struct fuse_file *ff = file->private_data;
241
242 spin_lock(&fc->lock);
b2430d75 243 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
33649c91
MS
244 BUG_ON(ff->reserved_req);
245 ff->reserved_req = req;
de5e3dec 246 wake_up_all(&fc->reserved_req_waitq);
33649c91
MS
247 spin_unlock(&fc->lock);
248 fput(file);
249}
250
251/*
252 * Gets a requests for a file operation, always succeeds
253 *
254 * This is used for sending the FLUSH request, which must get to
255 * userspace, due to POSIX locks which may need to be unlocked.
256 *
257 * If allocation fails due to OOM, use the reserved request in
258 * fuse_file.
259 *
260 * This is very unlikely to deadlock accidentally, since the
261 * filesystem should not have it's own file open. If deadlock is
262 * intentional, it can still be broken by "aborting" the filesystem.
263 */
b111c8c0
MP
264struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
265 struct file *file)
33649c91
MS
266{
267 struct fuse_req *req;
268
269 atomic_inc(&fc->num_waiting);
0aada884 270 wait_event(fc->blocked_waitq, fc->initialized);
9759bd51
MS
271 /* Matches smp_wmb() in fuse_set_initialized() */
272 smp_rmb();
b111c8c0 273 req = fuse_request_alloc(0);
33649c91
MS
274 if (!req)
275 req = get_reserved_req(fc, file);
276
277 fuse_req_init_context(req);
825d6d33
MS
278 __set_bit(FR_WAITING, &req->flags);
279 __clear_bit(FR_BACKGROUND, &req->flags);
33649c91
MS
280 return req;
281}
282
334f485d 283void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
7128ec2a
MS
284{
285 if (atomic_dec_and_test(&req->count)) {
825d6d33 286 if (test_bit(FR_BACKGROUND, &req->flags)) {
722d2bea
MP
287 /*
288 * We get here in the unlikely case that a background
289 * request was allocated but not sent
290 */
291 spin_lock(&fc->lock);
292 if (!fc->blocked)
293 wake_up(&fc->blocked_waitq);
294 spin_unlock(&fc->lock);
295 }
296
825d6d33
MS
297 if (test_bit(FR_WAITING, &req->flags)) {
298 __clear_bit(FR_WAITING, &req->flags);
9bc5ddda 299 atomic_dec(&fc->num_waiting);
73e0e738 300 }
33649c91
MS
301
302 if (req->stolen_file)
303 put_reserved_req(fc, req);
304 else
305 fuse_request_free(req);
7128ec2a
MS
306 }
307}
08cbf542 308EXPORT_SYMBOL_GPL(fuse_put_request);
7128ec2a 309
d12def1b
MS
310static unsigned len_args(unsigned numargs, struct fuse_arg *args)
311{
312 unsigned nbytes = 0;
313 unsigned i;
314
315 for (i = 0; i < numargs; i++)
316 nbytes += args[i].size;
317
318 return nbytes;
319}
320
f88996a9 321static u64 fuse_get_unique(struct fuse_iqueue *fiq)
d12def1b 322{
f88996a9 323 return ++fiq->reqctr;
d12def1b
MS
324}
325
f88996a9 326static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
d12def1b 327{
d12def1b
MS
328 req->in.h.len = sizeof(struct fuse_in_header) +
329 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
f88996a9 330 list_add_tail(&req->list, &fiq->pending);
4ce60812 331 wake_up_locked(&fiq->waitq);
f88996a9 332 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
d12def1b
MS
333}
334
07e77dca
MS
335void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
336 u64 nodeid, u64 nlookup)
337{
f88996a9
MS
338 struct fuse_iqueue *fiq = &fc->iq;
339
02c048b9
MS
340 forget->forget_one.nodeid = nodeid;
341 forget->forget_one.nlookup = nlookup;
07e77dca 342
4ce60812 343 spin_lock(&fiq->waitq.lock);
e16714d8 344 if (fiq->connected) {
f88996a9
MS
345 fiq->forget_list_tail->next = forget;
346 fiq->forget_list_tail = forget;
4ce60812 347 wake_up_locked(&fiq->waitq);
f88996a9 348 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
5dfcc87f
MS
349 } else {
350 kfree(forget);
351 }
4ce60812 352 spin_unlock(&fiq->waitq.lock);
07e77dca
MS
353}
354
d12def1b
MS
355static void flush_bg_queue(struct fuse_conn *fc)
356{
7a6d3c8b 357 while (fc->active_background < fc->max_background &&
d12def1b
MS
358 !list_empty(&fc->bg_queue)) {
359 struct fuse_req *req;
f88996a9 360 struct fuse_iqueue *fiq = &fc->iq;
d12def1b
MS
361
362 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
363 list_del(&req->list);
364 fc->active_background++;
4ce60812 365 spin_lock(&fiq->waitq.lock);
f88996a9
MS
366 req->in.h.unique = fuse_get_unique(fiq);
367 queue_request(fiq, req);
4ce60812 368 spin_unlock(&fiq->waitq.lock);
d12def1b
MS
369 }
370}
371
334f485d
MS
372/*
373 * This function is called when a request is finished. Either a reply
f9a2842e 374 * has arrived or it was aborted (and not yet sent) or some error
f43b155a 375 * occurred during communication with userspace, or the device file
51eb01e7
MS
376 * was closed. The requester thread is woken up (if still waiting),
377 * the 'end' callback is called if given, else the reference to the
378 * request is released
7128ec2a 379 *
d7133114 380 * Called with fc->lock, unlocks it
334f485d
MS
381 */
382static void request_end(struct fuse_conn *fc, struct fuse_req *req)
b9ca67b2 383__releases(fc->lock)
334f485d 384{
4ce60812 385 struct fuse_iqueue *fiq = &fc->iq;
51eb01e7
MS
386 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
387 req->end = NULL;
4ce60812 388 spin_lock(&fiq->waitq.lock);
0d8e84b0 389 list_del_init(&req->intr_entry);
4ce60812 390 spin_unlock(&fiq->waitq.lock);
33e14b4d
MS
391 WARN_ON(test_bit(FR_PENDING, &req->flags));
392 WARN_ON(test_bit(FR_SENT, &req->flags));
c4775267 393 smp_wmb();
33e14b4d 394 set_bit(FR_FINISHED, &req->flags);
825d6d33
MS
395 if (test_bit(FR_BACKGROUND, &req->flags)) {
396 clear_bit(FR_BACKGROUND, &req->flags);
722d2bea 397 if (fc->num_background == fc->max_background)
51eb01e7 398 fc->blocked = 0;
722d2bea
MP
399
400 /* Wake up next waiter, if any */
3c18ef81 401 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
722d2bea
MP
402 wake_up(&fc->blocked_waitq);
403
7a6d3c8b 404 if (fc->num_background == fc->congestion_threshold &&
a325f9b9 405 fc->connected && fc->bdi_initialized) {
8aa7e847
JA
406 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
407 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
f92b99b9 408 }
51eb01e7 409 fc->num_background--;
d12def1b
MS
410 fc->active_background--;
411 flush_bg_queue(fc);
334f485d 412 }
51eb01e7 413 spin_unlock(&fc->lock);
51eb01e7
MS
414 wake_up(&req->waitq);
415 if (end)
416 end(fc, req);
e9bb09dd 417 fuse_put_request(fc, req);
334f485d
MS
418}
419
f88996a9 420static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
a4d27e75 421{
4ce60812 422 spin_lock(&fiq->waitq.lock);
8f7bb368
MS
423 if (list_empty(&req->intr_entry)) {
424 list_add_tail(&req->intr_entry, &fiq->interrupts);
425 wake_up_locked(&fiq->waitq);
426 }
4ce60812 427 spin_unlock(&fiq->waitq.lock);
f88996a9 428 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
a4d27e75
MS
429}
430
7c352bdf 431static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
334f485d 432{
4ce60812 433 struct fuse_iqueue *fiq = &fc->iq;
c4775267
MS
434 int err;
435
a4d27e75
MS
436 if (!fc->no_interrupt) {
437 /* Any signal may interrupt this */
c4775267 438 err = wait_event_interruptible(req->waitq,
33e14b4d 439 test_bit(FR_FINISHED, &req->flags));
c4775267 440 if (!err)
a4d27e75
MS
441 return;
442
825d6d33 443 set_bit(FR_INTERRUPTED, &req->flags);
8f7bb368
MS
444 /* matches barrier in fuse_dev_do_read() */
445 smp_mb__after_atomic();
33e14b4d 446 if (test_bit(FR_SENT, &req->flags))
4ce60812 447 queue_interrupt(fiq, req);
a4d27e75
MS
448 }
449
825d6d33 450 if (!test_bit(FR_FORCE, &req->flags)) {
a4d27e75
MS
451 sigset_t oldset;
452
453 /* Only fatal signals may interrupt this */
51eb01e7 454 block_sigs(&oldset);
c4775267 455 err = wait_event_interruptible(req->waitq,
33e14b4d 456 test_bit(FR_FINISHED, &req->flags));
51eb01e7 457 restore_sigs(&oldset);
a131de0a 458
c4775267 459 if (!err)
a131de0a
MS
460 return;
461
4ce60812 462 spin_lock(&fiq->waitq.lock);
a131de0a 463 /* Request is not yet in userspace, bail out */
33e14b4d 464 if (test_bit(FR_PENDING, &req->flags)) {
a131de0a 465 list_del(&req->list);
4ce60812 466 spin_unlock(&fiq->waitq.lock);
a131de0a
MS
467 __fuse_put_request(req);
468 req->out.h.error = -EINTR;
469 return;
470 }
4ce60812 471 spin_unlock(&fiq->waitq.lock);
51eb01e7 472 }
334f485d 473
a131de0a
MS
474 /*
475 * Either request is already in userspace, or it was forced.
476 * Wait it out.
477 */
33e14b4d 478 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
334f485d
MS
479}
480
6a4e922c 481static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
334f485d 482{
e16714d8
MS
483 struct fuse_iqueue *fiq = &fc->iq;
484
825d6d33 485 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
4ce60812 486 spin_lock(&fiq->waitq.lock);
e16714d8 487 if (!fiq->connected) {
4ce60812 488 spin_unlock(&fiq->waitq.lock);
334f485d 489 req->out.h.error = -ENOTCONN;
c4775267 490 } else {
f88996a9
MS
491 req->in.h.unique = fuse_get_unique(fiq);
492 queue_request(fiq, req);
334f485d
MS
493 /* acquire extra reference, since request is still needed
494 after request_end() */
495 __fuse_get_request(req);
4ce60812 496 spin_unlock(&fiq->waitq.lock);
334f485d 497
7c352bdf 498 request_wait_answer(fc, req);
c4775267
MS
499 /* Pairs with smp_wmb() in request_end() */
500 smp_rmb();
334f485d 501 }
334f485d 502}
6a4e922c
EW
503
504void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
505{
825d6d33
MS
506 __set_bit(FR_ISREPLY, &req->flags);
507 if (!test_bit(FR_WAITING, &req->flags)) {
508 __set_bit(FR_WAITING, &req->flags);
5437f241
MS
509 atomic_inc(&fc->num_waiting);
510 }
6a4e922c
EW
511 __fuse_request_send(fc, req);
512}
08cbf542 513EXPORT_SYMBOL_GPL(fuse_request_send);
334f485d 514
21f62174
MS
515static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
516{
517 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
518 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
519
520 if (fc->minor < 9) {
521 switch (args->in.h.opcode) {
522 case FUSE_LOOKUP:
523 case FUSE_CREATE:
524 case FUSE_MKNOD:
525 case FUSE_MKDIR:
526 case FUSE_SYMLINK:
527 case FUSE_LINK:
528 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
529 break;
530 case FUSE_GETATTR:
531 case FUSE_SETATTR:
532 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
533 break;
534 }
535 }
536 if (fc->minor < 12) {
537 switch (args->in.h.opcode) {
538 case FUSE_CREATE:
539 args->in.args[0].size = sizeof(struct fuse_open_in);
540 break;
541 case FUSE_MKNOD:
542 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
543 break;
544 }
545 }
546}
547
7078187a
MS
548ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
549{
550 struct fuse_req *req;
551 ssize_t ret;
552
553 req = fuse_get_req(fc, 0);
554 if (IS_ERR(req))
555 return PTR_ERR(req);
556
21f62174
MS
557 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
558 fuse_adjust_compat(fc, args);
559
7078187a
MS
560 req->in.h.opcode = args->in.h.opcode;
561 req->in.h.nodeid = args->in.h.nodeid;
562 req->in.numargs = args->in.numargs;
563 memcpy(req->in.args, args->in.args,
564 args->in.numargs * sizeof(struct fuse_in_arg));
565 req->out.argvar = args->out.argvar;
566 req->out.numargs = args->out.numargs;
567 memcpy(req->out.args, args->out.args,
568 args->out.numargs * sizeof(struct fuse_arg));
569 fuse_request_send(fc, req);
570 ret = req->out.h.error;
571 if (!ret && args->out.argvar) {
572 BUG_ON(args->out.numargs != 1);
573 ret = req->out.args[0].size;
574 }
575 fuse_put_request(fc, req);
576
577 return ret;
578}
579
f0139aa8
MS
580/*
581 * Called under fc->lock
582 *
583 * fc->connected must have been checked previously
584 */
585void fuse_request_send_background_locked(struct fuse_conn *fc,
586 struct fuse_req *req)
d12def1b 587{
825d6d33
MS
588 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
589 if (!test_bit(FR_WAITING, &req->flags)) {
590 __set_bit(FR_WAITING, &req->flags);
5437f241
MS
591 atomic_inc(&fc->num_waiting);
592 }
825d6d33 593 __set_bit(FR_ISREPLY, &req->flags);
d12def1b 594 fc->num_background++;
7a6d3c8b 595 if (fc->num_background == fc->max_background)
d12def1b 596 fc->blocked = 1;
7a6d3c8b 597 if (fc->num_background == fc->congestion_threshold &&
a325f9b9 598 fc->bdi_initialized) {
8aa7e847
JA
599 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
600 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
d12def1b
MS
601 }
602 list_add_tail(&req->list, &fc->bg_queue);
603 flush_bg_queue(fc);
604}
605
f0139aa8 606void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
334f485d 607{
42dc6211 608 BUG_ON(!req->end);
d7133114 609 spin_lock(&fc->lock);
1e9a4ed9 610 if (fc->connected) {
f0139aa8 611 fuse_request_send_background_locked(fc, req);
d7133114 612 spin_unlock(&fc->lock);
334f485d 613 } else {
42dc6211 614 spin_unlock(&fc->lock);
334f485d 615 req->out.h.error = -ENOTCONN;
42dc6211
MS
616 req->end(fc, req);
617 fuse_put_request(fc, req);
334f485d
MS
618 }
619}
08cbf542 620EXPORT_SYMBOL_GPL(fuse_request_send_background);
334f485d 621
2d45ba38
MS
622static int fuse_request_send_notify_reply(struct fuse_conn *fc,
623 struct fuse_req *req, u64 unique)
624{
625 int err = -ENODEV;
f88996a9 626 struct fuse_iqueue *fiq = &fc->iq;
2d45ba38 627
825d6d33 628 __clear_bit(FR_ISREPLY, &req->flags);
2d45ba38 629 req->in.h.unique = unique;
4ce60812 630 spin_lock(&fiq->waitq.lock);
e16714d8 631 if (fiq->connected) {
f88996a9 632 queue_request(fiq, req);
2d45ba38
MS
633 err = 0;
634 }
4ce60812 635 spin_unlock(&fiq->waitq.lock);
2d45ba38
MS
636
637 return err;
638}
639
0b05b183
AA
640void fuse_force_forget(struct file *file, u64 nodeid)
641{
6131ffaa 642 struct inode *inode = file_inode(file);
0b05b183
AA
643 struct fuse_conn *fc = get_fuse_conn(inode);
644 struct fuse_req *req;
645 struct fuse_forget_in inarg;
646
647 memset(&inarg, 0, sizeof(inarg));
648 inarg.nlookup = 1;
b111c8c0 649 req = fuse_get_req_nofail_nopages(fc, file);
0b05b183
AA
650 req->in.h.opcode = FUSE_FORGET;
651 req->in.h.nodeid = nodeid;
652 req->in.numargs = 1;
653 req->in.args[0].size = sizeof(inarg);
654 req->in.args[0].value = &inarg;
825d6d33 655 __clear_bit(FR_ISREPLY, &req->flags);
6a4e922c
EW
656 __fuse_request_send(fc, req);
657 /* ignore errors */
658 fuse_put_request(fc, req);
0b05b183
AA
659}
660
334f485d
MS
661/*
662 * Lock the request. Up to the next unlock_request() there mustn't be
663 * anything that could cause a page-fault. If the request was already
f9a2842e 664 * aborted bail out.
334f485d 665 */
dc00809a 666static int lock_request(struct fuse_req *req)
334f485d
MS
667{
668 int err = 0;
669 if (req) {
dc00809a 670 spin_lock(&req->waitq.lock);
825d6d33 671 if (test_bit(FR_ABORTED, &req->flags))
334f485d
MS
672 err = -ENOENT;
673 else
825d6d33 674 set_bit(FR_LOCKED, &req->flags);
dc00809a 675 spin_unlock(&req->waitq.lock);
334f485d
MS
676 }
677 return err;
678}
679
680/*
0d8e84b0
MS
681 * Unlock request. If it was aborted while locked, caller is responsible
682 * for unlocking and ending the request.
334f485d 683 */
dc00809a 684static int unlock_request(struct fuse_req *req)
334f485d 685{
0d8e84b0 686 int err = 0;
334f485d 687 if (req) {
dc00809a 688 spin_lock(&req->waitq.lock);
825d6d33 689 if (test_bit(FR_ABORTED, &req->flags))
0d8e84b0
MS
690 err = -ENOENT;
691 else
825d6d33 692 clear_bit(FR_LOCKED, &req->flags);
dc00809a 693 spin_unlock(&req->waitq.lock);
334f485d 694 }
0d8e84b0 695 return err;
334f485d
MS
696}
697
698struct fuse_copy_state {
699 int write;
700 struct fuse_req *req;
6c09e94a 701 struct iov_iter *iter;
dd3bb14f
MS
702 struct pipe_buffer *pipebufs;
703 struct pipe_buffer *currbuf;
704 struct pipe_inode_info *pipe;
334f485d 705 unsigned long nr_segs;
334f485d 706 struct page *pg;
334f485d 707 unsigned len;
c55a01d3 708 unsigned offset;
ce534fb0 709 unsigned move_pages:1;
334f485d
MS
710};
711
dc00809a 712static void fuse_copy_init(struct fuse_copy_state *cs, int write,
6c09e94a 713 struct iov_iter *iter)
334f485d
MS
714{
715 memset(cs, 0, sizeof(*cs));
716 cs->write = write;
6c09e94a 717 cs->iter = iter;
334f485d
MS
718}
719
720/* Unmap and put previous page of userspace buffer */
8bfc016d 721static void fuse_copy_finish(struct fuse_copy_state *cs)
334f485d 722{
dd3bb14f
MS
723 if (cs->currbuf) {
724 struct pipe_buffer *buf = cs->currbuf;
725
c55a01d3 726 if (cs->write)
c3021629 727 buf->len = PAGE_SIZE - cs->len;
dd3bb14f 728 cs->currbuf = NULL;
c55a01d3 729 } else if (cs->pg) {
334f485d
MS
730 if (cs->write) {
731 flush_dcache_page(cs->pg);
732 set_page_dirty_lock(cs->pg);
733 }
734 put_page(cs->pg);
334f485d 735 }
c55a01d3 736 cs->pg = NULL;
334f485d
MS
737}
738
739/*
740 * Get another pagefull of userspace buffer, and map it to kernel
741 * address space, and lock request
742 */
743static int fuse_copy_fill(struct fuse_copy_state *cs)
744{
c55a01d3 745 struct page *page;
334f485d
MS
746 int err;
747
dc00809a 748 err = unlock_request(cs->req);
0d8e84b0
MS
749 if (err)
750 return err;
751
334f485d 752 fuse_copy_finish(cs);
dd3bb14f
MS
753 if (cs->pipebufs) {
754 struct pipe_buffer *buf = cs->pipebufs;
755
c3021629
MS
756 if (!cs->write) {
757 err = buf->ops->confirm(cs->pipe, buf);
758 if (err)
759 return err;
760
761 BUG_ON(!cs->nr_segs);
762 cs->currbuf = buf;
c55a01d3
MS
763 cs->pg = buf->page;
764 cs->offset = buf->offset;
c3021629 765 cs->len = buf->len;
c3021629
MS
766 cs->pipebufs++;
767 cs->nr_segs--;
768 } else {
c3021629
MS
769 if (cs->nr_segs == cs->pipe->buffers)
770 return -EIO;
771
772 page = alloc_page(GFP_HIGHUSER);
773 if (!page)
774 return -ENOMEM;
775
776 buf->page = page;
777 buf->offset = 0;
778 buf->len = 0;
779
780 cs->currbuf = buf;
c55a01d3
MS
781 cs->pg = page;
782 cs->offset = 0;
c3021629
MS
783 cs->len = PAGE_SIZE;
784 cs->pipebufs++;
785 cs->nr_segs++;
786 }
dd3bb14f 787 } else {
6c09e94a
AV
788 size_t off;
789 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
dd3bb14f
MS
790 if (err < 0)
791 return err;
6c09e94a
AV
792 BUG_ON(!err);
793 cs->len = err;
794 cs->offset = off;
c55a01d3 795 cs->pg = page;
6c09e94a
AV
796 cs->offset = off;
797 iov_iter_advance(cs->iter, err);
334f485d 798 }
334f485d 799
dc00809a 800 return lock_request(cs->req);
334f485d
MS
801}
802
803/* Do as much copy to/from userspace buffer as we can */
8bfc016d 804static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
334f485d
MS
805{
806 unsigned ncpy = min(*size, cs->len);
807 if (val) {
c55a01d3
MS
808 void *pgaddr = kmap_atomic(cs->pg);
809 void *buf = pgaddr + cs->offset;
810
334f485d 811 if (cs->write)
c55a01d3 812 memcpy(buf, *val, ncpy);
334f485d 813 else
c55a01d3
MS
814 memcpy(*val, buf, ncpy);
815
816 kunmap_atomic(pgaddr);
334f485d
MS
817 *val += ncpy;
818 }
819 *size -= ncpy;
820 cs->len -= ncpy;
c55a01d3 821 cs->offset += ncpy;
334f485d
MS
822 return ncpy;
823}
824
ce534fb0
MS
825static int fuse_check_page(struct page *page)
826{
827 if (page_mapcount(page) ||
828 page->mapping != NULL ||
829 page_count(page) != 1 ||
830 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
831 ~(1 << PG_locked |
832 1 << PG_referenced |
833 1 << PG_uptodate |
834 1 << PG_lru |
835 1 << PG_active |
836 1 << PG_reclaim))) {
837 printk(KERN_WARNING "fuse: trying to steal weird page\n");
838 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
839 return 1;
840 }
841 return 0;
842}
843
844static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
845{
846 int err;
847 struct page *oldpage = *pagep;
848 struct page *newpage;
849 struct pipe_buffer *buf = cs->pipebufs;
ce534fb0 850
dc00809a 851 err = unlock_request(cs->req);
0d8e84b0
MS
852 if (err)
853 return err;
854
ce534fb0
MS
855 fuse_copy_finish(cs);
856
857 err = buf->ops->confirm(cs->pipe, buf);
858 if (err)
859 return err;
860
861 BUG_ON(!cs->nr_segs);
862 cs->currbuf = buf;
863 cs->len = buf->len;
864 cs->pipebufs++;
865 cs->nr_segs--;
866
867 if (cs->len != PAGE_SIZE)
868 goto out_fallback;
869
870 if (buf->ops->steal(cs->pipe, buf) != 0)
871 goto out_fallback;
872
873 newpage = buf->page;
874
aa991b3b
MS
875 if (!PageUptodate(newpage))
876 SetPageUptodate(newpage);
ce534fb0
MS
877
878 ClearPageMappedToDisk(newpage);
879
880 if (fuse_check_page(newpage) != 0)
881 goto out_fallback_unlock;
882
ce534fb0
MS
883 /*
884 * This is a new and locked page, it shouldn't be mapped or
885 * have any special flags on it
886 */
887 if (WARN_ON(page_mapped(oldpage)))
888 goto out_fallback_unlock;
889 if (WARN_ON(page_has_private(oldpage)))
890 goto out_fallback_unlock;
891 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
892 goto out_fallback_unlock;
893 if (WARN_ON(PageMlocked(oldpage)))
894 goto out_fallback_unlock;
895
ef6a3c63 896 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
ce534fb0 897 if (err) {
ef6a3c63
MS
898 unlock_page(newpage);
899 return err;
ce534fb0 900 }
ef6a3c63 901
ce534fb0
MS
902 page_cache_get(newpage);
903
904 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
905 lru_cache_add_file(newpage);
906
907 err = 0;
dc00809a 908 spin_lock(&cs->req->waitq.lock);
825d6d33 909 if (test_bit(FR_ABORTED, &cs->req->flags))
ce534fb0
MS
910 err = -ENOENT;
911 else
912 *pagep = newpage;
dc00809a 913 spin_unlock(&cs->req->waitq.lock);
ce534fb0
MS
914
915 if (err) {
916 unlock_page(newpage);
917 page_cache_release(newpage);
918 return err;
919 }
920
921 unlock_page(oldpage);
922 page_cache_release(oldpage);
923 cs->len = 0;
924
925 return 0;
926
927out_fallback_unlock:
928 unlock_page(newpage);
929out_fallback:
c55a01d3
MS
930 cs->pg = buf->page;
931 cs->offset = buf->offset;
ce534fb0 932
dc00809a 933 err = lock_request(cs->req);
ce534fb0
MS
934 if (err)
935 return err;
936
937 return 1;
938}
939
c3021629
MS
940static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
941 unsigned offset, unsigned count)
942{
943 struct pipe_buffer *buf;
0d8e84b0 944 int err;
c3021629
MS
945
946 if (cs->nr_segs == cs->pipe->buffers)
947 return -EIO;
948
dc00809a 949 err = unlock_request(cs->req);
0d8e84b0
MS
950 if (err)
951 return err;
952
c3021629
MS
953 fuse_copy_finish(cs);
954
955 buf = cs->pipebufs;
956 page_cache_get(page);
957 buf->page = page;
958 buf->offset = offset;
959 buf->len = count;
960
961 cs->pipebufs++;
962 cs->nr_segs++;
963 cs->len = 0;
964
965 return 0;
966}
967
334f485d
MS
968/*
969 * Copy a page in the request to/from the userspace buffer. Must be
970 * done atomically
971 */
ce534fb0 972static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
8bfc016d 973 unsigned offset, unsigned count, int zeroing)
334f485d 974{
ce534fb0
MS
975 int err;
976 struct page *page = *pagep;
977
b6777c40
MS
978 if (page && zeroing && count < PAGE_SIZE)
979 clear_highpage(page);
980
334f485d 981 while (count) {
c3021629
MS
982 if (cs->write && cs->pipebufs && page) {
983 return fuse_ref_page(cs, page, offset, count);
984 } else if (!cs->len) {
ce534fb0
MS
985 if (cs->move_pages && page &&
986 offset == 0 && count == PAGE_SIZE) {
987 err = fuse_try_move_page(cs, pagep);
988 if (err <= 0)
989 return err;
990 } else {
991 err = fuse_copy_fill(cs);
992 if (err)
993 return err;
994 }
1729a16c 995 }
334f485d 996 if (page) {
2408f6ef 997 void *mapaddr = kmap_atomic(page);
334f485d
MS
998 void *buf = mapaddr + offset;
999 offset += fuse_copy_do(cs, &buf, &count);
2408f6ef 1000 kunmap_atomic(mapaddr);
334f485d
MS
1001 } else
1002 offset += fuse_copy_do(cs, NULL, &count);
1003 }
1004 if (page && !cs->write)
1005 flush_dcache_page(page);
1006 return 0;
1007}
1008
1009/* Copy pages in the request to/from userspace buffer */
1010static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
1011 int zeroing)
1012{
1013 unsigned i;
1014 struct fuse_req *req = cs->req;
334f485d
MS
1015
1016 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
ce534fb0 1017 int err;
85f40aec
MP
1018 unsigned offset = req->page_descs[i].offset;
1019 unsigned count = min(nbytes, req->page_descs[i].length);
ce534fb0
MS
1020
1021 err = fuse_copy_page(cs, &req->pages[i], offset, count,
1022 zeroing);
334f485d
MS
1023 if (err)
1024 return err;
1025
1026 nbytes -= count;
334f485d
MS
1027 }
1028 return 0;
1029}
1030
1031/* Copy a single argument in the request to/from userspace buffer */
1032static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1033{
1034 while (size) {
1729a16c
MS
1035 if (!cs->len) {
1036 int err = fuse_copy_fill(cs);
1037 if (err)
1038 return err;
1039 }
334f485d
MS
1040 fuse_copy_do(cs, &val, &size);
1041 }
1042 return 0;
1043}
1044
1045/* Copy request arguments to/from userspace buffer */
1046static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1047 unsigned argpages, struct fuse_arg *args,
1048 int zeroing)
1049{
1050 int err = 0;
1051 unsigned i;
1052
1053 for (i = 0; !err && i < numargs; i++) {
1054 struct fuse_arg *arg = &args[i];
1055 if (i == numargs - 1 && argpages)
1056 err = fuse_copy_pages(cs, arg->size, zeroing);
1057 else
1058 err = fuse_copy_one(cs, arg->value, arg->size);
1059 }
1060 return err;
1061}
1062
f88996a9 1063static int forget_pending(struct fuse_iqueue *fiq)
07e77dca 1064{
f88996a9 1065 return fiq->forget_list_head.next != NULL;
07e77dca
MS
1066}
1067
f88996a9 1068static int request_pending(struct fuse_iqueue *fiq)
a4d27e75 1069{
f88996a9
MS
1070 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1071 forget_pending(fiq);
a4d27e75
MS
1072}
1073
a4d27e75
MS
1074/*
1075 * Transfer an interrupt request to userspace
1076 *
1077 * Unlike other requests this is assembled on demand, without a need
1078 * to allocate a separate fuse_req structure.
1079 *
fd22d62e 1080 * Called with fiq->waitq.lock held, releases it
a4d27e75 1081 */
fd22d62e
MS
1082static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1083 struct fuse_copy_state *cs,
c3021629 1084 size_t nbytes, struct fuse_req *req)
fd22d62e 1085__releases(fiq->waitq.lock)
a4d27e75 1086{
a4d27e75
MS
1087 struct fuse_in_header ih;
1088 struct fuse_interrupt_in arg;
1089 unsigned reqsize = sizeof(ih) + sizeof(arg);
1090 int err;
1091
1092 list_del_init(&req->intr_entry);
4ce60812 1093 req->intr_unique = fuse_get_unique(fiq);
a4d27e75
MS
1094 memset(&ih, 0, sizeof(ih));
1095 memset(&arg, 0, sizeof(arg));
1096 ih.len = reqsize;
1097 ih.opcode = FUSE_INTERRUPT;
1098 ih.unique = req->intr_unique;
1099 arg.unique = req->in.h.unique;
1100
4ce60812 1101 spin_unlock(&fiq->waitq.lock);
c3021629 1102 if (nbytes < reqsize)
a4d27e75
MS
1103 return -EINVAL;
1104
c3021629 1105 err = fuse_copy_one(cs, &ih, sizeof(ih));
a4d27e75 1106 if (!err)
c3021629
MS
1107 err = fuse_copy_one(cs, &arg, sizeof(arg));
1108 fuse_copy_finish(cs);
a4d27e75
MS
1109
1110 return err ? err : reqsize;
1111}
1112
f88996a9 1113static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
02c048b9
MS
1114 unsigned max,
1115 unsigned *countp)
07e77dca 1116{
f88996a9 1117 struct fuse_forget_link *head = fiq->forget_list_head.next;
02c048b9
MS
1118 struct fuse_forget_link **newhead = &head;
1119 unsigned count;
07e77dca 1120
02c048b9
MS
1121 for (count = 0; *newhead != NULL && count < max; count++)
1122 newhead = &(*newhead)->next;
1123
f88996a9 1124 fiq->forget_list_head.next = *newhead;
02c048b9 1125 *newhead = NULL;
f88996a9
MS
1126 if (fiq->forget_list_head.next == NULL)
1127 fiq->forget_list_tail = &fiq->forget_list_head;
07e77dca 1128
02c048b9
MS
1129 if (countp != NULL)
1130 *countp = count;
1131
1132 return head;
07e77dca
MS
1133}
1134
fd22d62e 1135static int fuse_read_single_forget(struct fuse_iqueue *fiq,
07e77dca
MS
1136 struct fuse_copy_state *cs,
1137 size_t nbytes)
fd22d62e 1138__releases(fiq->waitq.lock)
07e77dca
MS
1139{
1140 int err;
f88996a9 1141 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
07e77dca 1142 struct fuse_forget_in arg = {
02c048b9 1143 .nlookup = forget->forget_one.nlookup,
07e77dca
MS
1144 };
1145 struct fuse_in_header ih = {
1146 .opcode = FUSE_FORGET,
02c048b9 1147 .nodeid = forget->forget_one.nodeid,
f88996a9 1148 .unique = fuse_get_unique(fiq),
07e77dca
MS
1149 .len = sizeof(ih) + sizeof(arg),
1150 };
1151
4ce60812 1152 spin_unlock(&fiq->waitq.lock);
07e77dca
MS
1153 kfree(forget);
1154 if (nbytes < ih.len)
1155 return -EINVAL;
1156
1157 err = fuse_copy_one(cs, &ih, sizeof(ih));
1158 if (!err)
1159 err = fuse_copy_one(cs, &arg, sizeof(arg));
1160 fuse_copy_finish(cs);
1161
1162 if (err)
1163 return err;
1164
1165 return ih.len;
1166}
1167
fd22d62e 1168static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
02c048b9 1169 struct fuse_copy_state *cs, size_t nbytes)
fd22d62e 1170__releases(fiq->waitq.lock)
02c048b9
MS
1171{
1172 int err;
1173 unsigned max_forgets;
1174 unsigned count;
1175 struct fuse_forget_link *head;
1176 struct fuse_batch_forget_in arg = { .count = 0 };
1177 struct fuse_in_header ih = {
1178 .opcode = FUSE_BATCH_FORGET,
f88996a9 1179 .unique = fuse_get_unique(fiq),
02c048b9
MS
1180 .len = sizeof(ih) + sizeof(arg),
1181 };
1182
1183 if (nbytes < ih.len) {
4ce60812 1184 spin_unlock(&fiq->waitq.lock);
02c048b9
MS
1185 return -EINVAL;
1186 }
1187
1188 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
f88996a9 1189 head = dequeue_forget(fiq, max_forgets, &count);
4ce60812 1190 spin_unlock(&fiq->waitq.lock);
02c048b9
MS
1191
1192 arg.count = count;
1193 ih.len += count * sizeof(struct fuse_forget_one);
1194 err = fuse_copy_one(cs, &ih, sizeof(ih));
1195 if (!err)
1196 err = fuse_copy_one(cs, &arg, sizeof(arg));
1197
1198 while (head) {
1199 struct fuse_forget_link *forget = head;
1200
1201 if (!err) {
1202 err = fuse_copy_one(cs, &forget->forget_one,
1203 sizeof(forget->forget_one));
1204 }
1205 head = forget->next;
1206 kfree(forget);
1207 }
1208
1209 fuse_copy_finish(cs);
1210
1211 if (err)
1212 return err;
1213
1214 return ih.len;
1215}
1216
fd22d62e
MS
1217static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1218 struct fuse_copy_state *cs,
02c048b9 1219 size_t nbytes)
fd22d62e 1220__releases(fiq->waitq.lock)
02c048b9 1221{
f88996a9 1222 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
fd22d62e 1223 return fuse_read_single_forget(fiq, cs, nbytes);
02c048b9 1224 else
fd22d62e 1225 return fuse_read_batch_forget(fiq, cs, nbytes);
02c048b9
MS
1226}
1227
334f485d
MS
1228/*
1229 * Read a single request into the userspace filesystem's buffer. This
1230 * function waits until a request is available, then removes it from
1231 * the pending list and copies request data to userspace buffer. If
f9a2842e
MS
1232 * no reply is needed (FORGET) or request has been aborted or there
1233 * was an error during the copying then it's finished by calling
334f485d
MS
1234 * request_end(). Otherwise add it to the processing list, and set
1235 * the 'sent' flag.
1236 */
c3021629
MS
1237static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1238 struct fuse_copy_state *cs, size_t nbytes)
334f485d 1239{
82cbdcd3 1240 ssize_t err;
f88996a9 1241 struct fuse_iqueue *fiq = &fc->iq;
3a2b5b9c 1242 struct fuse_pqueue *fpq = &fc->pq;
334f485d
MS
1243 struct fuse_req *req;
1244 struct fuse_in *in;
334f485d
MS
1245 unsigned reqsize;
1246
1d3d752b 1247 restart:
4ce60812 1248 spin_lock(&fiq->waitq.lock);
e5ac1d1e 1249 err = -EAGAIN;
e16714d8 1250 if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
f88996a9 1251 !request_pending(fiq))
e5ac1d1e
JD
1252 goto err_unlock;
1253
5250921b
MS
1254 err = wait_event_interruptible_exclusive_locked(fiq->waitq,
1255 !fiq->connected || request_pending(fiq));
1256 if (err)
1257 goto err_unlock;
1258
334f485d 1259 err = -ENODEV;
e16714d8 1260 if (!fiq->connected)
334f485d 1261 goto err_unlock;
334f485d 1262
f88996a9
MS
1263 if (!list_empty(&fiq->interrupts)) {
1264 req = list_entry(fiq->interrupts.next, struct fuse_req,
a4d27e75 1265 intr_entry);
fd22d62e 1266 return fuse_read_interrupt(fiq, cs, nbytes, req);
a4d27e75
MS
1267 }
1268
f88996a9
MS
1269 if (forget_pending(fiq)) {
1270 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
fd22d62e 1271 return fuse_read_forget(fc, fiq, cs, nbytes);
07e77dca 1272
f88996a9
MS
1273 if (fiq->forget_batch <= -8)
1274 fiq->forget_batch = 16;
07e77dca
MS
1275 }
1276
f88996a9 1277 req = list_entry(fiq->pending.next, struct fuse_req, list);
33e14b4d 1278 clear_bit(FR_PENDING, &req->flags);
ef759258 1279 list_del_init(&req->list);
4ce60812
MS
1280 spin_unlock(&fiq->waitq.lock);
1281
fd22d62e 1282 spin_lock(&fc->lock);
334f485d 1283 in = &req->in;
1d3d752b
MS
1284 reqsize = in->h.len;
1285 /* If request is too large, reply with an error and restart the read */
c3021629 1286 if (nbytes < reqsize) {
1d3d752b
MS
1287 req->out.h.error = -EIO;
1288 /* SETXATTR is special, since it may contain too large data */
1289 if (in->h.opcode == FUSE_SETXATTR)
1290 req->out.h.error = -E2BIG;
1291 request_end(fc, req);
1292 goto restart;
334f485d 1293 }
45a91cb1 1294 spin_lock(&fpq->lock);
82cbdcd3 1295 list_add(&req->list, &fpq->io);
45a91cb1 1296 spin_unlock(&fpq->lock);
d7133114 1297 spin_unlock(&fc->lock);
c3021629
MS
1298 cs->req = req;
1299 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1d3d752b 1300 if (!err)
c3021629 1301 err = fuse_copy_args(cs, in->numargs, in->argpages,
1d3d752b 1302 (struct fuse_arg *) in->args, 0);
c3021629 1303 fuse_copy_finish(cs);
d7133114 1304 spin_lock(&fc->lock);
45a91cb1 1305 spin_lock(&fpq->lock);
825d6d33 1306 clear_bit(FR_LOCKED, &req->flags);
e96edd94 1307 if (!fpq->connected) {
82cbdcd3
MS
1308 err = -ENODEV;
1309 goto out_end;
c9c9d7df 1310 }
334f485d 1311 if (err) {
c9c9d7df 1312 req->out.h.error = -EIO;
82cbdcd3 1313 goto out_end;
334f485d 1314 }
825d6d33 1315 if (!test_bit(FR_ISREPLY, &req->flags)) {
82cbdcd3
MS
1316 err = reqsize;
1317 goto out_end;
334f485d 1318 }
82cbdcd3 1319 list_move_tail(&req->list, &fpq->processing);
45a91cb1 1320 spin_unlock(&fpq->lock);
82cbdcd3
MS
1321 set_bit(FR_SENT, &req->flags);
1322 /* matches barrier in request_wait_answer() */
1323 smp_mb__after_atomic();
1324 if (test_bit(FR_INTERRUPTED, &req->flags))
1325 queue_interrupt(fiq, req);
1326 spin_unlock(&fc->lock);
1327
334f485d
MS
1328 return reqsize;
1329
82cbdcd3
MS
1330out_end:
1331 list_del_init(&req->list);
45a91cb1 1332 spin_unlock(&fpq->lock);
82cbdcd3
MS
1333 request_end(fc, req);
1334 return err;
1335
334f485d 1336 err_unlock:
4ce60812 1337 spin_unlock(&fiq->waitq.lock);
334f485d
MS
1338 return err;
1339}
1340
94e4fe2c
TVB
1341static int fuse_dev_open(struct inode *inode, struct file *file)
1342{
1343 /*
1344 * The fuse device's file's private_data is used to hold
1345 * the fuse_conn(ection) when it is mounted, and is used to
1346 * keep track of whether the file has been mounted already.
1347 */
1348 file->private_data = NULL;
1349 return 0;
1350}
1351
fbdbacca 1352static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
c3021629
MS
1353{
1354 struct fuse_copy_state cs;
1355 struct file *file = iocb->ki_filp;
1356 struct fuse_conn *fc = fuse_get_conn(file);
1357 if (!fc)
1358 return -EPERM;
1359
fbdbacca
AV
1360 if (!iter_is_iovec(to))
1361 return -EINVAL;
1362
dc00809a 1363 fuse_copy_init(&cs, 1, to);
c3021629 1364
fbdbacca 1365 return fuse_dev_do_read(fc, file, &cs, iov_iter_count(to));
c3021629
MS
1366}
1367
c3021629
MS
1368static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1369 struct pipe_inode_info *pipe,
1370 size_t len, unsigned int flags)
1371{
1372 int ret;
1373 int page_nr = 0;
1374 int do_wakeup = 0;
1375 struct pipe_buffer *bufs;
1376 struct fuse_copy_state cs;
1377 struct fuse_conn *fc = fuse_get_conn(in);
1378 if (!fc)
1379 return -EPERM;
1380
07e77dca 1381 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
c3021629
MS
1382 if (!bufs)
1383 return -ENOMEM;
1384
dc00809a 1385 fuse_copy_init(&cs, 1, NULL);
c3021629
MS
1386 cs.pipebufs = bufs;
1387 cs.pipe = pipe;
1388 ret = fuse_dev_do_read(fc, in, &cs, len);
1389 if (ret < 0)
1390 goto out;
1391
1392 ret = 0;
1393 pipe_lock(pipe);
1394
1395 if (!pipe->readers) {
1396 send_sig(SIGPIPE, current, 0);
1397 if (!ret)
1398 ret = -EPIPE;
1399 goto out_unlock;
1400 }
1401
1402 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1403 ret = -EIO;
1404 goto out_unlock;
1405 }
1406
1407 while (page_nr < cs.nr_segs) {
1408 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1409 struct pipe_buffer *buf = pipe->bufs + newbuf;
1410
1411 buf->page = bufs[page_nr].page;
1412 buf->offset = bufs[page_nr].offset;
1413 buf->len = bufs[page_nr].len;
28a625cb
MS
1414 /*
1415 * Need to be careful about this. Having buf->ops in module
1416 * code can Oops if the buffer persists after module unload.
1417 */
1418 buf->ops = &nosteal_pipe_buf_ops;
c3021629
MS
1419
1420 pipe->nrbufs++;
1421 page_nr++;
1422 ret += buf->len;
1423
6447a3cf 1424 if (pipe->files)
c3021629
MS
1425 do_wakeup = 1;
1426 }
1427
1428out_unlock:
1429 pipe_unlock(pipe);
1430
1431 if (do_wakeup) {
1432 smp_mb();
1433 if (waitqueue_active(&pipe->wait))
1434 wake_up_interruptible(&pipe->wait);
1435 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1436 }
1437
1438out:
1439 for (; page_nr < cs.nr_segs; page_nr++)
1440 page_cache_release(bufs[page_nr].page);
1441
1442 kfree(bufs);
1443 return ret;
1444}
1445
95668a69
TH
1446static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1447 struct fuse_copy_state *cs)
1448{
1449 struct fuse_notify_poll_wakeup_out outarg;
f6d47a17 1450 int err = -EINVAL;
95668a69
TH
1451
1452 if (size != sizeof(outarg))
f6d47a17 1453 goto err;
95668a69
TH
1454
1455 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1456 if (err)
f6d47a17 1457 goto err;
95668a69 1458
f6d47a17 1459 fuse_copy_finish(cs);
95668a69 1460 return fuse_notify_poll_wakeup(fc, &outarg);
f6d47a17
MS
1461
1462err:
1463 fuse_copy_finish(cs);
1464 return err;
95668a69
TH
1465}
1466
3b463ae0
JM
1467static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1468 struct fuse_copy_state *cs)
1469{
1470 struct fuse_notify_inval_inode_out outarg;
1471 int err = -EINVAL;
1472
1473 if (size != sizeof(outarg))
1474 goto err;
1475
1476 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1477 if (err)
1478 goto err;
1479 fuse_copy_finish(cs);
1480
1481 down_read(&fc->killsb);
1482 err = -ENOENT;
b21dda43
MS
1483 if (fc->sb) {
1484 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1485 outarg.off, outarg.len);
1486 }
3b463ae0
JM
1487 up_read(&fc->killsb);
1488 return err;
1489
1490err:
1491 fuse_copy_finish(cs);
1492 return err;
1493}
1494
1495static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1496 struct fuse_copy_state *cs)
1497{
1498 struct fuse_notify_inval_entry_out outarg;
b2d82ee3
FW
1499 int err = -ENOMEM;
1500 char *buf;
3b463ae0
JM
1501 struct qstr name;
1502
b2d82ee3
FW
1503 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1504 if (!buf)
1505 goto err;
1506
1507 err = -EINVAL;
3b463ae0
JM
1508 if (size < sizeof(outarg))
1509 goto err;
1510
1511 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1512 if (err)
1513 goto err;
1514
1515 err = -ENAMETOOLONG;
1516 if (outarg.namelen > FUSE_NAME_MAX)
1517 goto err;
1518
c2183d1e
MS
1519 err = -EINVAL;
1520 if (size != sizeof(outarg) + outarg.namelen + 1)
1521 goto err;
1522
3b463ae0
JM
1523 name.name = buf;
1524 name.len = outarg.namelen;
1525 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1526 if (err)
1527 goto err;
1528 fuse_copy_finish(cs);
1529 buf[outarg.namelen] = 0;
1530 name.hash = full_name_hash(name.name, name.len);
1531
1532 down_read(&fc->killsb);
1533 err = -ENOENT;
b21dda43 1534 if (fc->sb)
451d0f59
JM
1535 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1536 up_read(&fc->killsb);
1537 kfree(buf);
1538 return err;
1539
1540err:
1541 kfree(buf);
1542 fuse_copy_finish(cs);
1543 return err;
1544}
1545
1546static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1547 struct fuse_copy_state *cs)
1548{
1549 struct fuse_notify_delete_out outarg;
1550 int err = -ENOMEM;
1551 char *buf;
1552 struct qstr name;
1553
1554 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1555 if (!buf)
1556 goto err;
1557
1558 err = -EINVAL;
1559 if (size < sizeof(outarg))
1560 goto err;
1561
1562 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1563 if (err)
1564 goto err;
1565
1566 err = -ENAMETOOLONG;
1567 if (outarg.namelen > FUSE_NAME_MAX)
1568 goto err;
1569
1570 err = -EINVAL;
1571 if (size != sizeof(outarg) + outarg.namelen + 1)
1572 goto err;
1573
1574 name.name = buf;
1575 name.len = outarg.namelen;
1576 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1577 if (err)
1578 goto err;
1579 fuse_copy_finish(cs);
1580 buf[outarg.namelen] = 0;
1581 name.hash = full_name_hash(name.name, name.len);
1582
1583 down_read(&fc->killsb);
1584 err = -ENOENT;
1585 if (fc->sb)
1586 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1587 outarg.child, &name);
3b463ae0 1588 up_read(&fc->killsb);
b2d82ee3 1589 kfree(buf);
3b463ae0
JM
1590 return err;
1591
1592err:
b2d82ee3 1593 kfree(buf);
3b463ae0
JM
1594 fuse_copy_finish(cs);
1595 return err;
1596}
1597
a1d75f25
MS
1598static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1599 struct fuse_copy_state *cs)
1600{
1601 struct fuse_notify_store_out outarg;
1602 struct inode *inode;
1603 struct address_space *mapping;
1604 u64 nodeid;
1605 int err;
1606 pgoff_t index;
1607 unsigned int offset;
1608 unsigned int num;
1609 loff_t file_size;
1610 loff_t end;
1611
1612 err = -EINVAL;
1613 if (size < sizeof(outarg))
1614 goto out_finish;
1615
1616 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1617 if (err)
1618 goto out_finish;
1619
1620 err = -EINVAL;
1621 if (size - sizeof(outarg) != outarg.size)
1622 goto out_finish;
1623
1624 nodeid = outarg.nodeid;
1625
1626 down_read(&fc->killsb);
1627
1628 err = -ENOENT;
1629 if (!fc->sb)
1630 goto out_up_killsb;
1631
1632 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1633 if (!inode)
1634 goto out_up_killsb;
1635
1636 mapping = inode->i_mapping;
1637 index = outarg.offset >> PAGE_CACHE_SHIFT;
1638 offset = outarg.offset & ~PAGE_CACHE_MASK;
1639 file_size = i_size_read(inode);
1640 end = outarg.offset + outarg.size;
1641 if (end > file_size) {
1642 file_size = end;
1643 fuse_write_update_size(inode, file_size);
1644 }
1645
1646 num = outarg.size;
1647 while (num) {
1648 struct page *page;
1649 unsigned int this_num;
1650
1651 err = -ENOMEM;
1652 page = find_or_create_page(mapping, index,
1653 mapping_gfp_mask(mapping));
1654 if (!page)
1655 goto out_iput;
1656
1657 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1658 err = fuse_copy_page(cs, &page, offset, this_num, 0);
063ec1e5
MS
1659 if (!err && offset == 0 &&
1660 (this_num == PAGE_CACHE_SIZE || file_size == end))
a1d75f25
MS
1661 SetPageUptodate(page);
1662 unlock_page(page);
1663 page_cache_release(page);
1664
1665 if (err)
1666 goto out_iput;
1667
1668 num -= this_num;
1669 offset = 0;
1670 index++;
1671 }
1672
1673 err = 0;
1674
1675out_iput:
1676 iput(inode);
1677out_up_killsb:
1678 up_read(&fc->killsb);
1679out_finish:
1680 fuse_copy_finish(cs);
1681 return err;
1682}
1683
2d45ba38
MS
1684static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1685{
b745bc85 1686 release_pages(req->pages, req->num_pages, false);
2d45ba38
MS
1687}
1688
1689static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1690 struct fuse_notify_retrieve_out *outarg)
1691{
1692 int err;
1693 struct address_space *mapping = inode->i_mapping;
1694 struct fuse_req *req;
1695 pgoff_t index;
1696 loff_t file_size;
1697 unsigned int num;
1698 unsigned int offset;
0157443c 1699 size_t total_len = 0;
4d53dc99 1700 int num_pages;
2d45ba38 1701
4d53dc99
MP
1702 offset = outarg->offset & ~PAGE_CACHE_MASK;
1703 file_size = i_size_read(inode);
1704
1705 num = outarg->size;
1706 if (outarg->offset > file_size)
1707 num = 0;
1708 else if (outarg->offset + num > file_size)
1709 num = file_size - outarg->offset;
1710
1711 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1712 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1713
1714 req = fuse_get_req(fc, num_pages);
2d45ba38
MS
1715 if (IS_ERR(req))
1716 return PTR_ERR(req);
1717
2d45ba38
MS
1718 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1719 req->in.h.nodeid = outarg->nodeid;
1720 req->in.numargs = 2;
1721 req->in.argpages = 1;
b2430d75 1722 req->page_descs[0].offset = offset;
2d45ba38
MS
1723 req->end = fuse_retrieve_end;
1724
1725 index = outarg->offset >> PAGE_CACHE_SHIFT;
2d45ba38 1726
4d53dc99 1727 while (num && req->num_pages < num_pages) {
2d45ba38
MS
1728 struct page *page;
1729 unsigned int this_num;
1730
1731 page = find_get_page(mapping, index);
1732 if (!page)
1733 break;
1734
1735 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1736 req->pages[req->num_pages] = page;
85f40aec 1737 req->page_descs[req->num_pages].length = this_num;
2d45ba38
MS
1738 req->num_pages++;
1739
c9e67d48 1740 offset = 0;
2d45ba38
MS
1741 num -= this_num;
1742 total_len += this_num;
48706d0a 1743 index++;
2d45ba38
MS
1744 }
1745 req->misc.retrieve_in.offset = outarg->offset;
1746 req->misc.retrieve_in.size = total_len;
1747 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1748 req->in.args[0].value = &req->misc.retrieve_in;
1749 req->in.args[1].size = total_len;
1750
1751 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1752 if (err)
1753 fuse_retrieve_end(fc, req);
1754
1755 return err;
1756}
1757
1758static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1759 struct fuse_copy_state *cs)
1760{
1761 struct fuse_notify_retrieve_out outarg;
1762 struct inode *inode;
1763 int err;
1764
1765 err = -EINVAL;
1766 if (size != sizeof(outarg))
1767 goto copy_finish;
1768
1769 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1770 if (err)
1771 goto copy_finish;
1772
1773 fuse_copy_finish(cs);
1774
1775 down_read(&fc->killsb);
1776 err = -ENOENT;
1777 if (fc->sb) {
1778 u64 nodeid = outarg.nodeid;
1779
1780 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1781 if (inode) {
1782 err = fuse_retrieve(fc, inode, &outarg);
1783 iput(inode);
1784 }
1785 }
1786 up_read(&fc->killsb);
1787
1788 return err;
1789
1790copy_finish:
1791 fuse_copy_finish(cs);
1792 return err;
1793}
1794
8599396b
TH
1795static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1796 unsigned int size, struct fuse_copy_state *cs)
1797{
0d278362
MS
1798 /* Don't try to move pages (yet) */
1799 cs->move_pages = 0;
1800
8599396b 1801 switch (code) {
95668a69
TH
1802 case FUSE_NOTIFY_POLL:
1803 return fuse_notify_poll(fc, size, cs);
1804
3b463ae0
JM
1805 case FUSE_NOTIFY_INVAL_INODE:
1806 return fuse_notify_inval_inode(fc, size, cs);
1807
1808 case FUSE_NOTIFY_INVAL_ENTRY:
1809 return fuse_notify_inval_entry(fc, size, cs);
1810
a1d75f25
MS
1811 case FUSE_NOTIFY_STORE:
1812 return fuse_notify_store(fc, size, cs);
1813
2d45ba38
MS
1814 case FUSE_NOTIFY_RETRIEVE:
1815 return fuse_notify_retrieve(fc, size, cs);
1816
451d0f59
JM
1817 case FUSE_NOTIFY_DELETE:
1818 return fuse_notify_delete(fc, size, cs);
1819
8599396b 1820 default:
f6d47a17 1821 fuse_copy_finish(cs);
8599396b
TH
1822 return -EINVAL;
1823 }
1824}
1825
334f485d 1826/* Look up request on processing list by unique ID */
3a2b5b9c 1827static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
334f485d 1828{
05726aca 1829 struct fuse_req *req;
334f485d 1830
3a2b5b9c 1831 list_for_each_entry(req, &fpq->processing, list) {
a4d27e75 1832 if (req->in.h.unique == unique || req->intr_unique == unique)
334f485d
MS
1833 return req;
1834 }
1835 return NULL;
1836}
1837
1838static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1839 unsigned nbytes)
1840{
1841 unsigned reqsize = sizeof(struct fuse_out_header);
1842
1843 if (out->h.error)
1844 return nbytes != reqsize ? -EINVAL : 0;
1845
1846 reqsize += len_args(out->numargs, out->args);
1847
1848 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1849 return -EINVAL;
1850 else if (reqsize > nbytes) {
1851 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1852 unsigned diffsize = reqsize - nbytes;
1853 if (diffsize > lastarg->size)
1854 return -EINVAL;
1855 lastarg->size -= diffsize;
1856 }
1857 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1858 out->page_zeroing);
1859}
1860
1861/*
1862 * Write a single reply to a request. First the header is copied from
1863 * the write buffer. The request is then searched on the processing
1864 * list by the unique ID found in the header. If found, then remove
1865 * it from the list and copy the rest of the buffer to the request.
1866 * The request is finished by calling request_end()
1867 */
dd3bb14f
MS
1868static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1869 struct fuse_copy_state *cs, size_t nbytes)
334f485d
MS
1870{
1871 int err;
3a2b5b9c 1872 struct fuse_pqueue *fpq = &fc->pq;
334f485d
MS
1873 struct fuse_req *req;
1874 struct fuse_out_header oh;
334f485d 1875
334f485d
MS
1876 if (nbytes < sizeof(struct fuse_out_header))
1877 return -EINVAL;
1878
dd3bb14f 1879 err = fuse_copy_one(cs, &oh, sizeof(oh));
334f485d
MS
1880 if (err)
1881 goto err_finish;
8599396b
TH
1882
1883 err = -EINVAL;
1884 if (oh.len != nbytes)
1885 goto err_finish;
1886
1887 /*
1888 * Zero oh.unique indicates unsolicited notification message
1889 * and error contains notification code.
1890 */
1891 if (!oh.unique) {
dd3bb14f 1892 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
8599396b
TH
1893 return err ? err : nbytes;
1894 }
1895
334f485d 1896 err = -EINVAL;
8599396b 1897 if (oh.error <= -1000 || oh.error > 0)
334f485d
MS
1898 goto err_finish;
1899
d7133114 1900 spin_lock(&fc->lock);
45a91cb1 1901 spin_lock(&fpq->lock);
69a53bf2 1902 err = -ENOENT;
e96edd94 1903 if (!fpq->connected)
45a91cb1 1904 goto err_unlock_pq;
69a53bf2 1905
3a2b5b9c 1906 req = request_find(fpq, oh.unique);
334f485d 1907 if (!req)
45a91cb1 1908 goto err_unlock_pq;
334f485d 1909
a4d27e75
MS
1910 /* Is it an interrupt reply? */
1911 if (req->intr_unique == oh.unique) {
45a91cb1
MS
1912 spin_unlock(&fpq->lock);
1913
a4d27e75
MS
1914 err = -EINVAL;
1915 if (nbytes != sizeof(struct fuse_out_header))
1916 goto err_unlock;
1917
1918 if (oh.error == -ENOSYS)
1919 fc->no_interrupt = 1;
1920 else if (oh.error == -EAGAIN)
f88996a9 1921 queue_interrupt(&fc->iq, req);
a4d27e75
MS
1922
1923 spin_unlock(&fc->lock);
dd3bb14f 1924 fuse_copy_finish(cs);
a4d27e75
MS
1925 return nbytes;
1926 }
1927
33e14b4d 1928 clear_bit(FR_SENT, &req->flags);
3a2b5b9c 1929 list_move(&req->list, &fpq->io);
334f485d 1930 req->out.h = oh;
825d6d33 1931 set_bit(FR_LOCKED, &req->flags);
45a91cb1 1932 spin_unlock(&fpq->lock);
dd3bb14f 1933 cs->req = req;
ce534fb0
MS
1934 if (!req->out.page_replace)
1935 cs->move_pages = 0;
d7133114 1936 spin_unlock(&fc->lock);
334f485d 1937
dd3bb14f
MS
1938 err = copy_out_args(cs, &req->out, nbytes);
1939 fuse_copy_finish(cs);
334f485d 1940
d7133114 1941 spin_lock(&fc->lock);
45a91cb1 1942 spin_lock(&fpq->lock);
825d6d33 1943 clear_bit(FR_LOCKED, &req->flags);
e96edd94 1944 if (!fpq->connected)
0d8e84b0
MS
1945 err = -ENOENT;
1946 else if (err)
334f485d 1947 req->out.h.error = -EIO;
f377cb79 1948 list_del_init(&req->list);
45a91cb1 1949 spin_unlock(&fpq->lock);
334f485d
MS
1950 request_end(fc, req);
1951
1952 return err ? err : nbytes;
1953
45a91cb1
MS
1954 err_unlock_pq:
1955 spin_unlock(&fpq->lock);
334f485d 1956 err_unlock:
d7133114 1957 spin_unlock(&fc->lock);
334f485d 1958 err_finish:
dd3bb14f 1959 fuse_copy_finish(cs);
334f485d
MS
1960 return err;
1961}
1962
fbdbacca 1963static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
dd3bb14f
MS
1964{
1965 struct fuse_copy_state cs;
1966 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1967 if (!fc)
1968 return -EPERM;
1969
fbdbacca
AV
1970 if (!iter_is_iovec(from))
1971 return -EINVAL;
1972
dc00809a 1973 fuse_copy_init(&cs, 0, from);
dd3bb14f 1974
fbdbacca 1975 return fuse_dev_do_write(fc, &cs, iov_iter_count(from));
dd3bb14f
MS
1976}
1977
1978static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1979 struct file *out, loff_t *ppos,
1980 size_t len, unsigned int flags)
1981{
1982 unsigned nbuf;
1983 unsigned idx;
1984 struct pipe_buffer *bufs;
1985 struct fuse_copy_state cs;
1986 struct fuse_conn *fc;
1987 size_t rem;
1988 ssize_t ret;
1989
1990 fc = fuse_get_conn(out);
1991 if (!fc)
1992 return -EPERM;
1993
07e77dca 1994 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
dd3bb14f
MS
1995 if (!bufs)
1996 return -ENOMEM;
1997
1998 pipe_lock(pipe);
1999 nbuf = 0;
2000 rem = 0;
2001 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
2002 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
2003
2004 ret = -EINVAL;
2005 if (rem < len) {
2006 pipe_unlock(pipe);
2007 goto out;
2008 }
2009
2010 rem = len;
2011 while (rem) {
2012 struct pipe_buffer *ibuf;
2013 struct pipe_buffer *obuf;
2014
2015 BUG_ON(nbuf >= pipe->buffers);
2016 BUG_ON(!pipe->nrbufs);
2017 ibuf = &pipe->bufs[pipe->curbuf];
2018 obuf = &bufs[nbuf];
2019
2020 if (rem >= ibuf->len) {
2021 *obuf = *ibuf;
2022 ibuf->ops = NULL;
2023 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
2024 pipe->nrbufs--;
2025 } else {
2026 ibuf->ops->get(pipe, ibuf);
2027 *obuf = *ibuf;
2028 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2029 obuf->len = rem;
2030 ibuf->offset += obuf->len;
2031 ibuf->len -= obuf->len;
2032 }
2033 nbuf++;
2034 rem -= obuf->len;
2035 }
2036 pipe_unlock(pipe);
2037
dc00809a 2038 fuse_copy_init(&cs, 0, NULL);
dd3bb14f 2039 cs.pipebufs = bufs;
6c09e94a 2040 cs.nr_segs = nbuf;
dd3bb14f
MS
2041 cs.pipe = pipe;
2042
ce534fb0
MS
2043 if (flags & SPLICE_F_MOVE)
2044 cs.move_pages = 1;
2045
dd3bb14f
MS
2046 ret = fuse_dev_do_write(fc, &cs, len);
2047
2048 for (idx = 0; idx < nbuf; idx++) {
2049 struct pipe_buffer *buf = &bufs[idx];
2050 buf->ops->release(pipe, buf);
2051 }
2052out:
2053 kfree(bufs);
2054 return ret;
2055}
2056
334f485d
MS
2057static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2058{
334f485d 2059 unsigned mask = POLLOUT | POLLWRNORM;
f88996a9 2060 struct fuse_iqueue *fiq;
7025d9ad 2061 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 2062 if (!fc)
7025d9ad 2063 return POLLERR;
334f485d 2064
f88996a9
MS
2065 fiq = &fc->iq;
2066 poll_wait(file, &fiq->waitq, wait);
334f485d 2067
4ce60812 2068 spin_lock(&fiq->waitq.lock);
e16714d8 2069 if (!fiq->connected)
7025d9ad 2070 mask = POLLERR;
f88996a9 2071 else if (request_pending(fiq))
7025d9ad 2072 mask |= POLLIN | POLLRDNORM;
4ce60812 2073 spin_unlock(&fiq->waitq.lock);
334f485d
MS
2074
2075 return mask;
2076}
2077
69a53bf2
MS
2078/*
2079 * Abort all requests on the given list (pending or processing)
2080 *
d7133114 2081 * This function releases and reacquires fc->lock
69a53bf2 2082 */
334f485d 2083static void end_requests(struct fuse_conn *fc, struct list_head *head)
b9ca67b2
MS
2084__releases(fc->lock)
2085__acquires(fc->lock)
334f485d
MS
2086{
2087 while (!list_empty(head)) {
2088 struct fuse_req *req;
2089 req = list_entry(head->next, struct fuse_req, list);
334f485d 2090 req->out.h.error = -ECONNABORTED;
33e14b4d
MS
2091 clear_bit(FR_PENDING, &req->flags);
2092 clear_bit(FR_SENT, &req->flags);
f377cb79 2093 list_del_init(&req->list);
334f485d 2094 request_end(fc, req);
d7133114 2095 spin_lock(&fc->lock);
334f485d
MS
2096 }
2097}
2098
357ccf2b
BG
2099static void end_polls(struct fuse_conn *fc)
2100{
2101 struct rb_node *p;
2102
2103 p = rb_first(&fc->polled_files);
2104
2105 while (p) {
2106 struct fuse_file *ff;
2107 ff = rb_entry(p, struct fuse_file, polled_node);
2108 wake_up_interruptible_all(&ff->poll_wait);
2109
2110 p = rb_next(p);
2111 }
2112}
2113
69a53bf2
MS
2114/*
2115 * Abort all requests.
2116 *
b716d425
MS
2117 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2118 * filesystem.
2119 *
2120 * The same effect is usually achievable through killing the filesystem daemon
2121 * and all users of the filesystem. The exception is the combination of an
2122 * asynchronous request and the tricky deadlock (see
2123 * Documentation/filesystems/fuse.txt).
69a53bf2 2124 *
b716d425
MS
2125 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2126 * requests, they should be finished off immediately. Locked requests will be
2127 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2128 * requests. It is possible that some request will finish before we can. This
2129 * is OK, the request will in that case be removed from the list before we touch
2130 * it.
69a53bf2
MS
2131 */
2132void fuse_abort_conn(struct fuse_conn *fc)
2133{
f88996a9 2134 struct fuse_iqueue *fiq = &fc->iq;
3a2b5b9c 2135 struct fuse_pqueue *fpq = &fc->pq;
f88996a9 2136
d7133114 2137 spin_lock(&fc->lock);
69a53bf2 2138 if (fc->connected) {
b716d425 2139 struct fuse_req *req, *next;
41f98274
MS
2140 LIST_HEAD(to_end1);
2141 LIST_HEAD(to_end2);
b716d425 2142
69a53bf2 2143 fc->connected = 0;
51eb01e7 2144 fc->blocked = 0;
9759bd51 2145 fuse_set_initialized(fc);
45a91cb1 2146 spin_lock(&fpq->lock);
e96edd94 2147 fpq->connected = 0;
3a2b5b9c 2148 list_for_each_entry_safe(req, next, &fpq->io, list) {
b716d425
MS
2149 req->out.h.error = -ECONNABORTED;
2150 spin_lock(&req->waitq.lock);
2151 set_bit(FR_ABORTED, &req->flags);
2152 if (!test_bit(FR_LOCKED, &req->flags))
41f98274 2153 list_move(&req->list, &to_end1);
b716d425
MS
2154 spin_unlock(&req->waitq.lock);
2155 }
24b4d33d 2156 list_splice_init(&fpq->processing, &to_end2);
45a91cb1 2157 spin_unlock(&fpq->lock);
41f98274
MS
2158 fc->max_background = UINT_MAX;
2159 flush_bg_queue(fc);
8c91189a 2160
4ce60812 2161 spin_lock(&fiq->waitq.lock);
8c91189a 2162 fiq->connected = 0;
f88996a9 2163 list_splice_init(&fiq->pending, &to_end2);
8c91189a
MS
2164 while (forget_pending(fiq))
2165 kfree(dequeue_forget(fiq, 1, NULL));
4ce60812
MS
2166 wake_up_all_locked(&fiq->waitq);
2167 spin_unlock(&fiq->waitq.lock);
8c91189a
MS
2168 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2169
41f98274
MS
2170 while (!list_empty(&to_end1)) {
2171 req = list_first_entry(&to_end1, struct fuse_req, list);
b716d425 2172 __fuse_get_request(req);
f377cb79 2173 list_del_init(&req->list);
b716d425
MS
2174 request_end(fc, req);
2175 spin_lock(&fc->lock);
2176 }
41f98274 2177 end_requests(fc, &to_end2);
357ccf2b 2178 end_polls(fc);
51eb01e7 2179 wake_up_all(&fc->blocked_waitq);
69a53bf2 2180 }
d7133114 2181 spin_unlock(&fc->lock);
69a53bf2 2182}
08cbf542 2183EXPORT_SYMBOL_GPL(fuse_abort_conn);
69a53bf2 2184
08cbf542 2185int fuse_dev_release(struct inode *inode, struct file *file)
334f485d 2186{
0720b315 2187 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 2188 if (fc) {
3a2b5b9c 2189 WARN_ON(!list_empty(&fc->pq.io));
f88996a9 2190 WARN_ON(fc->iq.fasync != NULL);
ccd0a0bd 2191 fuse_abort_conn(fc);
bafa9654 2192 fuse_conn_put(fc);
385a17bf 2193 }
f543f253 2194
334f485d
MS
2195 return 0;
2196}
08cbf542 2197EXPORT_SYMBOL_GPL(fuse_dev_release);
334f485d 2198
385a17bf
JD
2199static int fuse_dev_fasync(int fd, struct file *file, int on)
2200{
2201 struct fuse_conn *fc = fuse_get_conn(file);
2202 if (!fc)
a87046d8 2203 return -EPERM;
385a17bf
JD
2204
2205 /* No locking - fasync_helper does its own locking */
f88996a9 2206 return fasync_helper(fd, file, on, &fc->iq.fasync);
385a17bf
JD
2207}
2208
4b6f5d20 2209const struct file_operations fuse_dev_operations = {
334f485d 2210 .owner = THIS_MODULE,
94e4fe2c 2211 .open = fuse_dev_open,
334f485d 2212 .llseek = no_llseek,
fbdbacca 2213 .read_iter = fuse_dev_read,
c3021629 2214 .splice_read = fuse_dev_splice_read,
fbdbacca 2215 .write_iter = fuse_dev_write,
dd3bb14f 2216 .splice_write = fuse_dev_splice_write,
334f485d
MS
2217 .poll = fuse_dev_poll,
2218 .release = fuse_dev_release,
385a17bf 2219 .fasync = fuse_dev_fasync,
334f485d 2220};
08cbf542 2221EXPORT_SYMBOL_GPL(fuse_dev_operations);
334f485d
MS
2222
2223static struct miscdevice fuse_miscdevice = {
2224 .minor = FUSE_MINOR,
2225 .name = "fuse",
2226 .fops = &fuse_dev_operations,
2227};
2228
2229int __init fuse_dev_init(void)
2230{
2231 int err = -ENOMEM;
2232 fuse_req_cachep = kmem_cache_create("fuse_request",
2233 sizeof(struct fuse_req),
20c2df83 2234 0, 0, NULL);
334f485d
MS
2235 if (!fuse_req_cachep)
2236 goto out;
2237
2238 err = misc_register(&fuse_miscdevice);
2239 if (err)
2240 goto out_cache_clean;
2241
2242 return 0;
2243
2244 out_cache_clean:
2245 kmem_cache_destroy(fuse_req_cachep);
2246 out:
2247 return err;
2248}
2249
2250void fuse_dev_cleanup(void)
2251{
2252 misc_deregister(&fuse_miscdevice);
2253 kmem_cache_destroy(fuse_req_cachep);
2254}