]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/fuse/dev.c
aio: don't include aio.h in sched.h
[mirror_ubuntu-artful-kernel.git] / fs / fuse / dev.c
CommitLineData
334f485d
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
334f485d
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/uio.h>
15#include <linux/miscdevice.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/slab.h>
dd3bb14f 19#include <linux/pipe_fs_i.h>
ce534fb0
MS
20#include <linux/swap.h>
21#include <linux/splice.h>
a27bb332 22#include <linux/aio.h>
334f485d
MS
23
24MODULE_ALIAS_MISCDEV(FUSE_MINOR);
578454ff 25MODULE_ALIAS("devname:fuse");
334f485d 26
e18b890b 27static struct kmem_cache *fuse_req_cachep;
334f485d 28
8bfc016d 29static struct fuse_conn *fuse_get_conn(struct file *file)
334f485d 30{
0720b315
MS
31 /*
32 * Lockless access is OK, because file->private data is set
33 * once during mount and is valid until the file is released.
34 */
35 return file->private_data;
334f485d
MS
36}
37
4250c066 38static void fuse_request_init(struct fuse_req *req, struct page **pages,
b2430d75 39 struct fuse_page_desc *page_descs,
4250c066 40 unsigned npages)
334f485d
MS
41{
42 memset(req, 0, sizeof(*req));
4250c066 43 memset(pages, 0, sizeof(*pages) * npages);
b2430d75 44 memset(page_descs, 0, sizeof(*page_descs) * npages);
334f485d 45 INIT_LIST_HEAD(&req->list);
a4d27e75 46 INIT_LIST_HEAD(&req->intr_entry);
334f485d
MS
47 init_waitqueue_head(&req->waitq);
48 atomic_set(&req->count, 1);
4250c066 49 req->pages = pages;
b2430d75 50 req->page_descs = page_descs;
4250c066 51 req->max_pages = npages;
334f485d
MS
52}
53
4250c066 54static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
334f485d 55{
4250c066
MP
56 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
57 if (req) {
58 struct page **pages;
b2430d75 59 struct fuse_page_desc *page_descs;
4250c066 60
b2430d75 61 if (npages <= FUSE_REQ_INLINE_PAGES) {
4250c066 62 pages = req->inline_pages;
b2430d75
MP
63 page_descs = req->inline_page_descs;
64 } else {
4250c066 65 pages = kmalloc(sizeof(struct page *) * npages, flags);
b2430d75
MP
66 page_descs = kmalloc(sizeof(struct fuse_page_desc) *
67 npages, flags);
68 }
4250c066 69
b2430d75
MP
70 if (!pages || !page_descs) {
71 kfree(pages);
72 kfree(page_descs);
4250c066
MP
73 kmem_cache_free(fuse_req_cachep, req);
74 return NULL;
75 }
76
b2430d75 77 fuse_request_init(req, pages, page_descs, npages);
4250c066 78 }
334f485d
MS
79 return req;
80}
4250c066
MP
81
82struct fuse_req *fuse_request_alloc(unsigned npages)
83{
84 return __fuse_request_alloc(npages, GFP_KERNEL);
85}
08cbf542 86EXPORT_SYMBOL_GPL(fuse_request_alloc);
334f485d 87
4250c066 88struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
3be5a52b 89{
4250c066 90 return __fuse_request_alloc(npages, GFP_NOFS);
3be5a52b
MS
91}
92
334f485d
MS
93void fuse_request_free(struct fuse_req *req)
94{
b2430d75 95 if (req->pages != req->inline_pages) {
4250c066 96 kfree(req->pages);
b2430d75
MP
97 kfree(req->page_descs);
98 }
334f485d
MS
99 kmem_cache_free(fuse_req_cachep, req);
100}
101
8bfc016d 102static void block_sigs(sigset_t *oldset)
334f485d
MS
103{
104 sigset_t mask;
105
106 siginitsetinv(&mask, sigmask(SIGKILL));
107 sigprocmask(SIG_BLOCK, &mask, oldset);
108}
109
8bfc016d 110static void restore_sigs(sigset_t *oldset)
334f485d
MS
111{
112 sigprocmask(SIG_SETMASK, oldset, NULL);
113}
114
334f485d
MS
115static void __fuse_get_request(struct fuse_req *req)
116{
117 atomic_inc(&req->count);
118}
119
120/* Must be called with > 1 refcount */
121static void __fuse_put_request(struct fuse_req *req)
122{
123 BUG_ON(atomic_read(&req->count) < 2);
124 atomic_dec(&req->count);
125}
126
33649c91
MS
127static void fuse_req_init_context(struct fuse_req *req)
128{
499dcf20
EB
129 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
130 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
33649c91
MS
131 req->in.h.pid = current->pid;
132}
133
b111c8c0 134struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
334f485d 135{
08a53cdc
MS
136 struct fuse_req *req;
137 sigset_t oldset;
9bc5ddda 138 int intr;
08a53cdc
MS
139 int err;
140
9bc5ddda 141 atomic_inc(&fc->num_waiting);
08a53cdc 142 block_sigs(&oldset);
9bc5ddda 143 intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
08a53cdc 144 restore_sigs(&oldset);
9bc5ddda
MS
145 err = -EINTR;
146 if (intr)
147 goto out;
08a53cdc 148
51eb01e7
MS
149 err = -ENOTCONN;
150 if (!fc->connected)
151 goto out;
152
b111c8c0 153 req = fuse_request_alloc(npages);
9bc5ddda 154 err = -ENOMEM;
ce1d5a49 155 if (!req)
9bc5ddda 156 goto out;
334f485d 157
33649c91 158 fuse_req_init_context(req);
9bc5ddda 159 req->waiting = 1;
334f485d 160 return req;
9bc5ddda
MS
161
162 out:
163 atomic_dec(&fc->num_waiting);
164 return ERR_PTR(err);
334f485d 165}
08cbf542 166EXPORT_SYMBOL_GPL(fuse_get_req);
334f485d 167
33649c91
MS
168/*
169 * Return request in fuse_file->reserved_req. However that may
170 * currently be in use. If that is the case, wait for it to become
171 * available.
172 */
173static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
174 struct file *file)
175{
176 struct fuse_req *req = NULL;
177 struct fuse_file *ff = file->private_data;
178
179 do {
de5e3dec 180 wait_event(fc->reserved_req_waitq, ff->reserved_req);
33649c91
MS
181 spin_lock(&fc->lock);
182 if (ff->reserved_req) {
183 req = ff->reserved_req;
184 ff->reserved_req = NULL;
cb0942b8 185 req->stolen_file = get_file(file);
33649c91
MS
186 }
187 spin_unlock(&fc->lock);
188 } while (!req);
189
190 return req;
191}
192
193/*
194 * Put stolen request back into fuse_file->reserved_req
195 */
196static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
197{
198 struct file *file = req->stolen_file;
199 struct fuse_file *ff = file->private_data;
200
201 spin_lock(&fc->lock);
b2430d75 202 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
33649c91
MS
203 BUG_ON(ff->reserved_req);
204 ff->reserved_req = req;
de5e3dec 205 wake_up_all(&fc->reserved_req_waitq);
33649c91
MS
206 spin_unlock(&fc->lock);
207 fput(file);
208}
209
210/*
211 * Gets a requests for a file operation, always succeeds
212 *
213 * This is used for sending the FLUSH request, which must get to
214 * userspace, due to POSIX locks which may need to be unlocked.
215 *
216 * If allocation fails due to OOM, use the reserved request in
217 * fuse_file.
218 *
219 * This is very unlikely to deadlock accidentally, since the
220 * filesystem should not have it's own file open. If deadlock is
221 * intentional, it can still be broken by "aborting" the filesystem.
222 */
b111c8c0
MP
223struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
224 struct file *file)
33649c91
MS
225{
226 struct fuse_req *req;
227
228 atomic_inc(&fc->num_waiting);
229 wait_event(fc->blocked_waitq, !fc->blocked);
b111c8c0 230 req = fuse_request_alloc(0);
33649c91
MS
231 if (!req)
232 req = get_reserved_req(fc, file);
233
234 fuse_req_init_context(req);
235 req->waiting = 1;
236 return req;
237}
238
334f485d 239void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
7128ec2a
MS
240{
241 if (atomic_dec_and_test(&req->count)) {
9bc5ddda
MS
242 if (req->waiting)
243 atomic_dec(&fc->num_waiting);
33649c91
MS
244
245 if (req->stolen_file)
246 put_reserved_req(fc, req);
247 else
248 fuse_request_free(req);
7128ec2a
MS
249 }
250}
08cbf542 251EXPORT_SYMBOL_GPL(fuse_put_request);
7128ec2a 252
d12def1b
MS
253static unsigned len_args(unsigned numargs, struct fuse_arg *args)
254{
255 unsigned nbytes = 0;
256 unsigned i;
257
258 for (i = 0; i < numargs; i++)
259 nbytes += args[i].size;
260
261 return nbytes;
262}
263
264static u64 fuse_get_unique(struct fuse_conn *fc)
265{
266 fc->reqctr++;
267 /* zero is special */
268 if (fc->reqctr == 0)
269 fc->reqctr = 1;
270
271 return fc->reqctr;
272}
273
274static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
275{
d12def1b
MS
276 req->in.h.len = sizeof(struct fuse_in_header) +
277 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
278 list_add_tail(&req->list, &fc->pending);
279 req->state = FUSE_REQ_PENDING;
280 if (!req->waiting) {
281 req->waiting = 1;
282 atomic_inc(&fc->num_waiting);
283 }
284 wake_up(&fc->waitq);
285 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
286}
287
07e77dca
MS
288void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
289 u64 nodeid, u64 nlookup)
290{
02c048b9
MS
291 forget->forget_one.nodeid = nodeid;
292 forget->forget_one.nlookup = nlookup;
07e77dca
MS
293
294 spin_lock(&fc->lock);
5dfcc87f
MS
295 if (fc->connected) {
296 fc->forget_list_tail->next = forget;
297 fc->forget_list_tail = forget;
298 wake_up(&fc->waitq);
299 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
300 } else {
301 kfree(forget);
302 }
07e77dca
MS
303 spin_unlock(&fc->lock);
304}
305
d12def1b
MS
306static void flush_bg_queue(struct fuse_conn *fc)
307{
7a6d3c8b 308 while (fc->active_background < fc->max_background &&
d12def1b
MS
309 !list_empty(&fc->bg_queue)) {
310 struct fuse_req *req;
311
312 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
313 list_del(&req->list);
314 fc->active_background++;
2d45ba38 315 req->in.h.unique = fuse_get_unique(fc);
d12def1b
MS
316 queue_request(fc, req);
317 }
318}
319
334f485d
MS
320/*
321 * This function is called when a request is finished. Either a reply
f9a2842e 322 * has arrived or it was aborted (and not yet sent) or some error
f43b155a 323 * occurred during communication with userspace, or the device file
51eb01e7
MS
324 * was closed. The requester thread is woken up (if still waiting),
325 * the 'end' callback is called if given, else the reference to the
326 * request is released
7128ec2a 327 *
d7133114 328 * Called with fc->lock, unlocks it
334f485d
MS
329 */
330static void request_end(struct fuse_conn *fc, struct fuse_req *req)
b9ca67b2 331__releases(fc->lock)
334f485d 332{
51eb01e7
MS
333 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
334 req->end = NULL;
d77a1d5b 335 list_del(&req->list);
a4d27e75 336 list_del(&req->intr_entry);
83cfd493 337 req->state = FUSE_REQ_FINISHED;
51eb01e7 338 if (req->background) {
7a6d3c8b 339 if (fc->num_background == fc->max_background) {
51eb01e7
MS
340 fc->blocked = 0;
341 wake_up_all(&fc->blocked_waitq);
342 }
7a6d3c8b 343 if (fc->num_background == fc->congestion_threshold &&
a325f9b9 344 fc->connected && fc->bdi_initialized) {
8aa7e847
JA
345 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
346 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
f92b99b9 347 }
51eb01e7 348 fc->num_background--;
d12def1b
MS
349 fc->active_background--;
350 flush_bg_queue(fc);
334f485d 351 }
51eb01e7 352 spin_unlock(&fc->lock);
51eb01e7
MS
353 wake_up(&req->waitq);
354 if (end)
355 end(fc, req);
e9bb09dd 356 fuse_put_request(fc, req);
334f485d
MS
357}
358
a4d27e75
MS
359static void wait_answer_interruptible(struct fuse_conn *fc,
360 struct fuse_req *req)
b9ca67b2
MS
361__releases(fc->lock)
362__acquires(fc->lock)
a4d27e75
MS
363{
364 if (signal_pending(current))
365 return;
366
367 spin_unlock(&fc->lock);
368 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
369 spin_lock(&fc->lock);
370}
371
372static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
373{
374 list_add_tail(&req->intr_entry, &fc->interrupts);
375 wake_up(&fc->waitq);
376 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
377}
378
7c352bdf 379static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
b9ca67b2
MS
380__releases(fc->lock)
381__acquires(fc->lock)
334f485d 382{
a4d27e75
MS
383 if (!fc->no_interrupt) {
384 /* Any signal may interrupt this */
385 wait_answer_interruptible(fc, req);
334f485d 386
a4d27e75
MS
387 if (req->aborted)
388 goto aborted;
389 if (req->state == FUSE_REQ_FINISHED)
390 return;
391
392 req->interrupted = 1;
393 if (req->state == FUSE_REQ_SENT)
394 queue_interrupt(fc, req);
395 }
396
a131de0a 397 if (!req->force) {
a4d27e75
MS
398 sigset_t oldset;
399
400 /* Only fatal signals may interrupt this */
51eb01e7 401 block_sigs(&oldset);
a4d27e75 402 wait_answer_interruptible(fc, req);
51eb01e7 403 restore_sigs(&oldset);
a131de0a
MS
404
405 if (req->aborted)
406 goto aborted;
407 if (req->state == FUSE_REQ_FINISHED)
408 return;
409
410 /* Request is not yet in userspace, bail out */
411 if (req->state == FUSE_REQ_PENDING) {
412 list_del(&req->list);
413 __fuse_put_request(req);
414 req->out.h.error = -EINTR;
415 return;
416 }
51eb01e7 417 }
334f485d 418
a131de0a
MS
419 /*
420 * Either request is already in userspace, or it was forced.
421 * Wait it out.
422 */
423 spin_unlock(&fc->lock);
424 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
425 spin_lock(&fc->lock);
a4d27e75 426
a131de0a
MS
427 if (!req->aborted)
428 return;
a4d27e75
MS
429
430 aborted:
a131de0a 431 BUG_ON(req->state != FUSE_REQ_FINISHED);
334f485d
MS
432 if (req->locked) {
433 /* This is uninterruptible sleep, because data is
434 being copied to/from the buffers of req. During
435 locked state, there mustn't be any filesystem
436 operation (e.g. page fault), since that could lead
437 to deadlock */
d7133114 438 spin_unlock(&fc->lock);
334f485d 439 wait_event(req->waitq, !req->locked);
d7133114 440 spin_lock(&fc->lock);
334f485d 441 }
334f485d
MS
442}
443
6a4e922c 444static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
334f485d 445{
d7133114 446 spin_lock(&fc->lock);
1e9a4ed9 447 if (!fc->connected)
334f485d
MS
448 req->out.h.error = -ENOTCONN;
449 else if (fc->conn_error)
450 req->out.h.error = -ECONNREFUSED;
451 else {
2d45ba38 452 req->in.h.unique = fuse_get_unique(fc);
334f485d
MS
453 queue_request(fc, req);
454 /* acquire extra reference, since request is still needed
455 after request_end() */
456 __fuse_get_request(req);
457
7c352bdf 458 request_wait_answer(fc, req);
334f485d 459 }
d7133114 460 spin_unlock(&fc->lock);
334f485d 461}
6a4e922c
EW
462
463void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
464{
465 req->isreply = 1;
466 __fuse_request_send(fc, req);
467}
08cbf542 468EXPORT_SYMBOL_GPL(fuse_request_send);
334f485d 469
b93f858a
TH
470static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
471 struct fuse_req *req)
d12def1b
MS
472{
473 req->background = 1;
474 fc->num_background++;
7a6d3c8b 475 if (fc->num_background == fc->max_background)
d12def1b 476 fc->blocked = 1;
7a6d3c8b 477 if (fc->num_background == fc->congestion_threshold &&
a325f9b9 478 fc->bdi_initialized) {
8aa7e847
JA
479 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
480 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
d12def1b
MS
481 }
482 list_add_tail(&req->list, &fc->bg_queue);
483 flush_bg_queue(fc);
484}
485
b93f858a 486static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
334f485d 487{
d7133114 488 spin_lock(&fc->lock);
1e9a4ed9 489 if (fc->connected) {
b93f858a 490 fuse_request_send_nowait_locked(fc, req);
d7133114 491 spin_unlock(&fc->lock);
334f485d
MS
492 } else {
493 req->out.h.error = -ENOTCONN;
494 request_end(fc, req);
495 }
496}
497
b93f858a 498void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
499{
500 req->isreply = 1;
b93f858a 501 fuse_request_send_nowait(fc, req);
334f485d 502}
08cbf542 503EXPORT_SYMBOL_GPL(fuse_request_send_background);
334f485d 504
2d45ba38
MS
505static int fuse_request_send_notify_reply(struct fuse_conn *fc,
506 struct fuse_req *req, u64 unique)
507{
508 int err = -ENODEV;
509
510 req->isreply = 0;
511 req->in.h.unique = unique;
512 spin_lock(&fc->lock);
513 if (fc->connected) {
514 queue_request(fc, req);
515 err = 0;
516 }
517 spin_unlock(&fc->lock);
518
519 return err;
520}
521
3be5a52b
MS
522/*
523 * Called under fc->lock
524 *
525 * fc->connected must have been checked previously
526 */
b93f858a
TH
527void fuse_request_send_background_locked(struct fuse_conn *fc,
528 struct fuse_req *req)
3be5a52b
MS
529{
530 req->isreply = 1;
b93f858a 531 fuse_request_send_nowait_locked(fc, req);
3be5a52b
MS
532}
533
0b05b183
AA
534void fuse_force_forget(struct file *file, u64 nodeid)
535{
6131ffaa 536 struct inode *inode = file_inode(file);
0b05b183
AA
537 struct fuse_conn *fc = get_fuse_conn(inode);
538 struct fuse_req *req;
539 struct fuse_forget_in inarg;
540
541 memset(&inarg, 0, sizeof(inarg));
542 inarg.nlookup = 1;
b111c8c0 543 req = fuse_get_req_nofail_nopages(fc, file);
0b05b183
AA
544 req->in.h.opcode = FUSE_FORGET;
545 req->in.h.nodeid = nodeid;
546 req->in.numargs = 1;
547 req->in.args[0].size = sizeof(inarg);
548 req->in.args[0].value = &inarg;
549 req->isreply = 0;
6a4e922c
EW
550 __fuse_request_send(fc, req);
551 /* ignore errors */
552 fuse_put_request(fc, req);
0b05b183
AA
553}
554
334f485d
MS
555/*
556 * Lock the request. Up to the next unlock_request() there mustn't be
557 * anything that could cause a page-fault. If the request was already
f9a2842e 558 * aborted bail out.
334f485d 559 */
d7133114 560static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
561{
562 int err = 0;
563 if (req) {
d7133114 564 spin_lock(&fc->lock);
f9a2842e 565 if (req->aborted)
334f485d
MS
566 err = -ENOENT;
567 else
568 req->locked = 1;
d7133114 569 spin_unlock(&fc->lock);
334f485d
MS
570 }
571 return err;
572}
573
574/*
f9a2842e 575 * Unlock request. If it was aborted during being locked, the
334f485d
MS
576 * requester thread is currently waiting for it to be unlocked, so
577 * wake it up.
578 */
d7133114 579static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
580{
581 if (req) {
d7133114 582 spin_lock(&fc->lock);
334f485d 583 req->locked = 0;
f9a2842e 584 if (req->aborted)
334f485d 585 wake_up(&req->waitq);
d7133114 586 spin_unlock(&fc->lock);
334f485d
MS
587 }
588}
589
590struct fuse_copy_state {
d7133114 591 struct fuse_conn *fc;
334f485d
MS
592 int write;
593 struct fuse_req *req;
594 const struct iovec *iov;
dd3bb14f
MS
595 struct pipe_buffer *pipebufs;
596 struct pipe_buffer *currbuf;
597 struct pipe_inode_info *pipe;
334f485d
MS
598 unsigned long nr_segs;
599 unsigned long seglen;
600 unsigned long addr;
601 struct page *pg;
602 void *mapaddr;
603 void *buf;
604 unsigned len;
ce534fb0 605 unsigned move_pages:1;
334f485d
MS
606};
607
d7133114 608static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
c3021629 609 int write,
d7133114 610 const struct iovec *iov, unsigned long nr_segs)
334f485d
MS
611{
612 memset(cs, 0, sizeof(*cs));
d7133114 613 cs->fc = fc;
334f485d 614 cs->write = write;
334f485d
MS
615 cs->iov = iov;
616 cs->nr_segs = nr_segs;
617}
618
619/* Unmap and put previous page of userspace buffer */
8bfc016d 620static void fuse_copy_finish(struct fuse_copy_state *cs)
334f485d 621{
dd3bb14f
MS
622 if (cs->currbuf) {
623 struct pipe_buffer *buf = cs->currbuf;
624
c3021629
MS
625 if (!cs->write) {
626 buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
627 } else {
7909b1c6 628 kunmap(buf->page);
c3021629
MS
629 buf->len = PAGE_SIZE - cs->len;
630 }
dd3bb14f
MS
631 cs->currbuf = NULL;
632 cs->mapaddr = NULL;
633 } else if (cs->mapaddr) {
7909b1c6 634 kunmap(cs->pg);
334f485d
MS
635 if (cs->write) {
636 flush_dcache_page(cs->pg);
637 set_page_dirty_lock(cs->pg);
638 }
639 put_page(cs->pg);
640 cs->mapaddr = NULL;
641 }
642}
643
644/*
645 * Get another pagefull of userspace buffer, and map it to kernel
646 * address space, and lock request
647 */
648static int fuse_copy_fill(struct fuse_copy_state *cs)
649{
650 unsigned long offset;
651 int err;
652
d7133114 653 unlock_request(cs->fc, cs->req);
334f485d 654 fuse_copy_finish(cs);
dd3bb14f
MS
655 if (cs->pipebufs) {
656 struct pipe_buffer *buf = cs->pipebufs;
657
c3021629
MS
658 if (!cs->write) {
659 err = buf->ops->confirm(cs->pipe, buf);
660 if (err)
661 return err;
662
663 BUG_ON(!cs->nr_segs);
664 cs->currbuf = buf;
7909b1c6 665 cs->mapaddr = buf->ops->map(cs->pipe, buf, 0);
c3021629
MS
666 cs->len = buf->len;
667 cs->buf = cs->mapaddr + buf->offset;
668 cs->pipebufs++;
669 cs->nr_segs--;
670 } else {
671 struct page *page;
dd3bb14f 672
c3021629
MS
673 if (cs->nr_segs == cs->pipe->buffers)
674 return -EIO;
675
676 page = alloc_page(GFP_HIGHUSER);
677 if (!page)
678 return -ENOMEM;
679
680 buf->page = page;
681 buf->offset = 0;
682 buf->len = 0;
683
684 cs->currbuf = buf;
7909b1c6 685 cs->mapaddr = kmap(page);
c3021629
MS
686 cs->buf = cs->mapaddr;
687 cs->len = PAGE_SIZE;
688 cs->pipebufs++;
689 cs->nr_segs++;
690 }
dd3bb14f
MS
691 } else {
692 if (!cs->seglen) {
693 BUG_ON(!cs->nr_segs);
694 cs->seglen = cs->iov[0].iov_len;
695 cs->addr = (unsigned long) cs->iov[0].iov_base;
696 cs->iov++;
697 cs->nr_segs--;
698 }
699 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
700 if (err < 0)
701 return err;
702 BUG_ON(err != 1);
703 offset = cs->addr % PAGE_SIZE;
7909b1c6 704 cs->mapaddr = kmap(cs->pg);
dd3bb14f
MS
705 cs->buf = cs->mapaddr + offset;
706 cs->len = min(PAGE_SIZE - offset, cs->seglen);
707 cs->seglen -= cs->len;
708 cs->addr += cs->len;
334f485d 709 }
334f485d 710
d7133114 711 return lock_request(cs->fc, cs->req);
334f485d
MS
712}
713
714/* Do as much copy to/from userspace buffer as we can */
8bfc016d 715static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
334f485d
MS
716{
717 unsigned ncpy = min(*size, cs->len);
718 if (val) {
719 if (cs->write)
720 memcpy(cs->buf, *val, ncpy);
721 else
722 memcpy(*val, cs->buf, ncpy);
723 *val += ncpy;
724 }
725 *size -= ncpy;
726 cs->len -= ncpy;
727 cs->buf += ncpy;
728 return ncpy;
729}
730
ce534fb0
MS
731static int fuse_check_page(struct page *page)
732{
733 if (page_mapcount(page) ||
734 page->mapping != NULL ||
735 page_count(page) != 1 ||
736 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
737 ~(1 << PG_locked |
738 1 << PG_referenced |
739 1 << PG_uptodate |
740 1 << PG_lru |
741 1 << PG_active |
742 1 << PG_reclaim))) {
743 printk(KERN_WARNING "fuse: trying to steal weird page\n");
744 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
745 return 1;
746 }
747 return 0;
748}
749
750static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
751{
752 int err;
753 struct page *oldpage = *pagep;
754 struct page *newpage;
755 struct pipe_buffer *buf = cs->pipebufs;
ce534fb0
MS
756
757 unlock_request(cs->fc, cs->req);
758 fuse_copy_finish(cs);
759
760 err = buf->ops->confirm(cs->pipe, buf);
761 if (err)
762 return err;
763
764 BUG_ON(!cs->nr_segs);
765 cs->currbuf = buf;
766 cs->len = buf->len;
767 cs->pipebufs++;
768 cs->nr_segs--;
769
770 if (cs->len != PAGE_SIZE)
771 goto out_fallback;
772
773 if (buf->ops->steal(cs->pipe, buf) != 0)
774 goto out_fallback;
775
776 newpage = buf->page;
777
778 if (WARN_ON(!PageUptodate(newpage)))
779 return -EIO;
780
781 ClearPageMappedToDisk(newpage);
782
783 if (fuse_check_page(newpage) != 0)
784 goto out_fallback_unlock;
785
ce534fb0
MS
786 /*
787 * This is a new and locked page, it shouldn't be mapped or
788 * have any special flags on it
789 */
790 if (WARN_ON(page_mapped(oldpage)))
791 goto out_fallback_unlock;
792 if (WARN_ON(page_has_private(oldpage)))
793 goto out_fallback_unlock;
794 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
795 goto out_fallback_unlock;
796 if (WARN_ON(PageMlocked(oldpage)))
797 goto out_fallback_unlock;
798
ef6a3c63 799 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
ce534fb0 800 if (err) {
ef6a3c63
MS
801 unlock_page(newpage);
802 return err;
ce534fb0 803 }
ef6a3c63 804
ce534fb0
MS
805 page_cache_get(newpage);
806
807 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
808 lru_cache_add_file(newpage);
809
810 err = 0;
811 spin_lock(&cs->fc->lock);
812 if (cs->req->aborted)
813 err = -ENOENT;
814 else
815 *pagep = newpage;
816 spin_unlock(&cs->fc->lock);
817
818 if (err) {
819 unlock_page(newpage);
820 page_cache_release(newpage);
821 return err;
822 }
823
824 unlock_page(oldpage);
825 page_cache_release(oldpage);
826 cs->len = 0;
827
828 return 0;
829
830out_fallback_unlock:
831 unlock_page(newpage);
832out_fallback:
833 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
834 cs->buf = cs->mapaddr + buf->offset;
835
836 err = lock_request(cs->fc, cs->req);
837 if (err)
838 return err;
839
840 return 1;
841}
842
c3021629
MS
843static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
844 unsigned offset, unsigned count)
845{
846 struct pipe_buffer *buf;
847
848 if (cs->nr_segs == cs->pipe->buffers)
849 return -EIO;
850
851 unlock_request(cs->fc, cs->req);
852 fuse_copy_finish(cs);
853
854 buf = cs->pipebufs;
855 page_cache_get(page);
856 buf->page = page;
857 buf->offset = offset;
858 buf->len = count;
859
860 cs->pipebufs++;
861 cs->nr_segs++;
862 cs->len = 0;
863
864 return 0;
865}
866
334f485d
MS
867/*
868 * Copy a page in the request to/from the userspace buffer. Must be
869 * done atomically
870 */
ce534fb0 871static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
8bfc016d 872 unsigned offset, unsigned count, int zeroing)
334f485d 873{
ce534fb0
MS
874 int err;
875 struct page *page = *pagep;
876
b6777c40
MS
877 if (page && zeroing && count < PAGE_SIZE)
878 clear_highpage(page);
879
334f485d 880 while (count) {
c3021629
MS
881 if (cs->write && cs->pipebufs && page) {
882 return fuse_ref_page(cs, page, offset, count);
883 } else if (!cs->len) {
ce534fb0
MS
884 if (cs->move_pages && page &&
885 offset == 0 && count == PAGE_SIZE) {
886 err = fuse_try_move_page(cs, pagep);
887 if (err <= 0)
888 return err;
889 } else {
890 err = fuse_copy_fill(cs);
891 if (err)
892 return err;
893 }
1729a16c 894 }
334f485d 895 if (page) {
2408f6ef 896 void *mapaddr = kmap_atomic(page);
334f485d
MS
897 void *buf = mapaddr + offset;
898 offset += fuse_copy_do(cs, &buf, &count);
2408f6ef 899 kunmap_atomic(mapaddr);
334f485d
MS
900 } else
901 offset += fuse_copy_do(cs, NULL, &count);
902 }
903 if (page && !cs->write)
904 flush_dcache_page(page);
905 return 0;
906}
907
908/* Copy pages in the request to/from userspace buffer */
909static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
910 int zeroing)
911{
912 unsigned i;
913 struct fuse_req *req = cs->req;
334f485d
MS
914
915 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
ce534fb0 916 int err;
85f40aec
MP
917 unsigned offset = req->page_descs[i].offset;
918 unsigned count = min(nbytes, req->page_descs[i].length);
ce534fb0
MS
919
920 err = fuse_copy_page(cs, &req->pages[i], offset, count,
921 zeroing);
334f485d
MS
922 if (err)
923 return err;
924
925 nbytes -= count;
334f485d
MS
926 }
927 return 0;
928}
929
930/* Copy a single argument in the request to/from userspace buffer */
931static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
932{
933 while (size) {
1729a16c
MS
934 if (!cs->len) {
935 int err = fuse_copy_fill(cs);
936 if (err)
937 return err;
938 }
334f485d
MS
939 fuse_copy_do(cs, &val, &size);
940 }
941 return 0;
942}
943
944/* Copy request arguments to/from userspace buffer */
945static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
946 unsigned argpages, struct fuse_arg *args,
947 int zeroing)
948{
949 int err = 0;
950 unsigned i;
951
952 for (i = 0; !err && i < numargs; i++) {
953 struct fuse_arg *arg = &args[i];
954 if (i == numargs - 1 && argpages)
955 err = fuse_copy_pages(cs, arg->size, zeroing);
956 else
957 err = fuse_copy_one(cs, arg->value, arg->size);
958 }
959 return err;
960}
961
07e77dca
MS
962static int forget_pending(struct fuse_conn *fc)
963{
964 return fc->forget_list_head.next != NULL;
965}
966
a4d27e75
MS
967static int request_pending(struct fuse_conn *fc)
968{
07e77dca
MS
969 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
970 forget_pending(fc);
a4d27e75
MS
971}
972
334f485d
MS
973/* Wait until a request is available on the pending list */
974static void request_wait(struct fuse_conn *fc)
b9ca67b2
MS
975__releases(fc->lock)
976__acquires(fc->lock)
334f485d
MS
977{
978 DECLARE_WAITQUEUE(wait, current);
979
980 add_wait_queue_exclusive(&fc->waitq, &wait);
a4d27e75 981 while (fc->connected && !request_pending(fc)) {
334f485d
MS
982 set_current_state(TASK_INTERRUPTIBLE);
983 if (signal_pending(current))
984 break;
985
d7133114 986 spin_unlock(&fc->lock);
334f485d 987 schedule();
d7133114 988 spin_lock(&fc->lock);
334f485d
MS
989 }
990 set_current_state(TASK_RUNNING);
991 remove_wait_queue(&fc->waitq, &wait);
992}
993
a4d27e75
MS
994/*
995 * Transfer an interrupt request to userspace
996 *
997 * Unlike other requests this is assembled on demand, without a need
998 * to allocate a separate fuse_req structure.
999 *
1000 * Called with fc->lock held, releases it
1001 */
c3021629
MS
1002static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
1003 size_t nbytes, struct fuse_req *req)
b9ca67b2 1004__releases(fc->lock)
a4d27e75 1005{
a4d27e75
MS
1006 struct fuse_in_header ih;
1007 struct fuse_interrupt_in arg;
1008 unsigned reqsize = sizeof(ih) + sizeof(arg);
1009 int err;
1010
1011 list_del_init(&req->intr_entry);
1012 req->intr_unique = fuse_get_unique(fc);
1013 memset(&ih, 0, sizeof(ih));
1014 memset(&arg, 0, sizeof(arg));
1015 ih.len = reqsize;
1016 ih.opcode = FUSE_INTERRUPT;
1017 ih.unique = req->intr_unique;
1018 arg.unique = req->in.h.unique;
1019
1020 spin_unlock(&fc->lock);
c3021629 1021 if (nbytes < reqsize)
a4d27e75
MS
1022 return -EINVAL;
1023
c3021629 1024 err = fuse_copy_one(cs, &ih, sizeof(ih));
a4d27e75 1025 if (!err)
c3021629
MS
1026 err = fuse_copy_one(cs, &arg, sizeof(arg));
1027 fuse_copy_finish(cs);
a4d27e75
MS
1028
1029 return err ? err : reqsize;
1030}
1031
02c048b9
MS
1032static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
1033 unsigned max,
1034 unsigned *countp)
07e77dca 1035{
02c048b9
MS
1036 struct fuse_forget_link *head = fc->forget_list_head.next;
1037 struct fuse_forget_link **newhead = &head;
1038 unsigned count;
07e77dca 1039
02c048b9
MS
1040 for (count = 0; *newhead != NULL && count < max; count++)
1041 newhead = &(*newhead)->next;
1042
1043 fc->forget_list_head.next = *newhead;
1044 *newhead = NULL;
07e77dca
MS
1045 if (fc->forget_list_head.next == NULL)
1046 fc->forget_list_tail = &fc->forget_list_head;
1047
02c048b9
MS
1048 if (countp != NULL)
1049 *countp = count;
1050
1051 return head;
07e77dca
MS
1052}
1053
1054static int fuse_read_single_forget(struct fuse_conn *fc,
1055 struct fuse_copy_state *cs,
1056 size_t nbytes)
1057__releases(fc->lock)
1058{
1059 int err;
02c048b9 1060 struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
07e77dca 1061 struct fuse_forget_in arg = {
02c048b9 1062 .nlookup = forget->forget_one.nlookup,
07e77dca
MS
1063 };
1064 struct fuse_in_header ih = {
1065 .opcode = FUSE_FORGET,
02c048b9 1066 .nodeid = forget->forget_one.nodeid,
07e77dca
MS
1067 .unique = fuse_get_unique(fc),
1068 .len = sizeof(ih) + sizeof(arg),
1069 };
1070
1071 spin_unlock(&fc->lock);
1072 kfree(forget);
1073 if (nbytes < ih.len)
1074 return -EINVAL;
1075
1076 err = fuse_copy_one(cs, &ih, sizeof(ih));
1077 if (!err)
1078 err = fuse_copy_one(cs, &arg, sizeof(arg));
1079 fuse_copy_finish(cs);
1080
1081 if (err)
1082 return err;
1083
1084 return ih.len;
1085}
1086
02c048b9
MS
1087static int fuse_read_batch_forget(struct fuse_conn *fc,
1088 struct fuse_copy_state *cs, size_t nbytes)
1089__releases(fc->lock)
1090{
1091 int err;
1092 unsigned max_forgets;
1093 unsigned count;
1094 struct fuse_forget_link *head;
1095 struct fuse_batch_forget_in arg = { .count = 0 };
1096 struct fuse_in_header ih = {
1097 .opcode = FUSE_BATCH_FORGET,
1098 .unique = fuse_get_unique(fc),
1099 .len = sizeof(ih) + sizeof(arg),
1100 };
1101
1102 if (nbytes < ih.len) {
1103 spin_unlock(&fc->lock);
1104 return -EINVAL;
1105 }
1106
1107 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1108 head = dequeue_forget(fc, max_forgets, &count);
1109 spin_unlock(&fc->lock);
1110
1111 arg.count = count;
1112 ih.len += count * sizeof(struct fuse_forget_one);
1113 err = fuse_copy_one(cs, &ih, sizeof(ih));
1114 if (!err)
1115 err = fuse_copy_one(cs, &arg, sizeof(arg));
1116
1117 while (head) {
1118 struct fuse_forget_link *forget = head;
1119
1120 if (!err) {
1121 err = fuse_copy_one(cs, &forget->forget_one,
1122 sizeof(forget->forget_one));
1123 }
1124 head = forget->next;
1125 kfree(forget);
1126 }
1127
1128 fuse_copy_finish(cs);
1129
1130 if (err)
1131 return err;
1132
1133 return ih.len;
1134}
1135
1136static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
1137 size_t nbytes)
1138__releases(fc->lock)
1139{
1140 if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
1141 return fuse_read_single_forget(fc, cs, nbytes);
1142 else
1143 return fuse_read_batch_forget(fc, cs, nbytes);
1144}
1145
334f485d
MS
1146/*
1147 * Read a single request into the userspace filesystem's buffer. This
1148 * function waits until a request is available, then removes it from
1149 * the pending list and copies request data to userspace buffer. If
f9a2842e
MS
1150 * no reply is needed (FORGET) or request has been aborted or there
1151 * was an error during the copying then it's finished by calling
334f485d
MS
1152 * request_end(). Otherwise add it to the processing list, and set
1153 * the 'sent' flag.
1154 */
c3021629
MS
1155static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1156 struct fuse_copy_state *cs, size_t nbytes)
334f485d
MS
1157{
1158 int err;
334f485d
MS
1159 struct fuse_req *req;
1160 struct fuse_in *in;
334f485d
MS
1161 unsigned reqsize;
1162
1d3d752b 1163 restart:
d7133114 1164 spin_lock(&fc->lock);
e5ac1d1e
JD
1165 err = -EAGAIN;
1166 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
a4d27e75 1167 !request_pending(fc))
e5ac1d1e
JD
1168 goto err_unlock;
1169
334f485d
MS
1170 request_wait(fc);
1171 err = -ENODEV;
9ba7cbba 1172 if (!fc->connected)
334f485d
MS
1173 goto err_unlock;
1174 err = -ERESTARTSYS;
a4d27e75 1175 if (!request_pending(fc))
334f485d
MS
1176 goto err_unlock;
1177
a4d27e75
MS
1178 if (!list_empty(&fc->interrupts)) {
1179 req = list_entry(fc->interrupts.next, struct fuse_req,
1180 intr_entry);
c3021629 1181 return fuse_read_interrupt(fc, cs, nbytes, req);
a4d27e75
MS
1182 }
1183
07e77dca
MS
1184 if (forget_pending(fc)) {
1185 if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
02c048b9 1186 return fuse_read_forget(fc, cs, nbytes);
07e77dca
MS
1187
1188 if (fc->forget_batch <= -8)
1189 fc->forget_batch = 16;
1190 }
1191
334f485d 1192 req = list_entry(fc->pending.next, struct fuse_req, list);
83cfd493 1193 req->state = FUSE_REQ_READING;
d77a1d5b 1194 list_move(&req->list, &fc->io);
334f485d
MS
1195
1196 in = &req->in;
1d3d752b
MS
1197 reqsize = in->h.len;
1198 /* If request is too large, reply with an error and restart the read */
c3021629 1199 if (nbytes < reqsize) {
1d3d752b
MS
1200 req->out.h.error = -EIO;
1201 /* SETXATTR is special, since it may contain too large data */
1202 if (in->h.opcode == FUSE_SETXATTR)
1203 req->out.h.error = -E2BIG;
1204 request_end(fc, req);
1205 goto restart;
334f485d 1206 }
d7133114 1207 spin_unlock(&fc->lock);
c3021629
MS
1208 cs->req = req;
1209 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1d3d752b 1210 if (!err)
c3021629 1211 err = fuse_copy_args(cs, in->numargs, in->argpages,
1d3d752b 1212 (struct fuse_arg *) in->args, 0);
c3021629 1213 fuse_copy_finish(cs);
d7133114 1214 spin_lock(&fc->lock);
334f485d 1215 req->locked = 0;
c9c9d7df
MS
1216 if (req->aborted) {
1217 request_end(fc, req);
1218 return -ENODEV;
1219 }
334f485d 1220 if (err) {
c9c9d7df 1221 req->out.h.error = -EIO;
334f485d
MS
1222 request_end(fc, req);
1223 return err;
1224 }
1225 if (!req->isreply)
1226 request_end(fc, req);
1227 else {
83cfd493 1228 req->state = FUSE_REQ_SENT;
d77a1d5b 1229 list_move_tail(&req->list, &fc->processing);
a4d27e75
MS
1230 if (req->interrupted)
1231 queue_interrupt(fc, req);
d7133114 1232 spin_unlock(&fc->lock);
334f485d
MS
1233 }
1234 return reqsize;
1235
1236 err_unlock:
d7133114 1237 spin_unlock(&fc->lock);
334f485d
MS
1238 return err;
1239}
1240
c3021629
MS
1241static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1242 unsigned long nr_segs, loff_t pos)
1243{
1244 struct fuse_copy_state cs;
1245 struct file *file = iocb->ki_filp;
1246 struct fuse_conn *fc = fuse_get_conn(file);
1247 if (!fc)
1248 return -EPERM;
1249
1250 fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1251
1252 return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1253}
1254
1255static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
1256 struct pipe_buffer *buf)
1257{
1258 return 1;
1259}
1260
1261static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
1262 .can_merge = 0,
1263 .map = generic_pipe_buf_map,
1264 .unmap = generic_pipe_buf_unmap,
1265 .confirm = generic_pipe_buf_confirm,
1266 .release = generic_pipe_buf_release,
1267 .steal = fuse_dev_pipe_buf_steal,
1268 .get = generic_pipe_buf_get,
1269};
1270
1271static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1272 struct pipe_inode_info *pipe,
1273 size_t len, unsigned int flags)
1274{
1275 int ret;
1276 int page_nr = 0;
1277 int do_wakeup = 0;
1278 struct pipe_buffer *bufs;
1279 struct fuse_copy_state cs;
1280 struct fuse_conn *fc = fuse_get_conn(in);
1281 if (!fc)
1282 return -EPERM;
1283
07e77dca 1284 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
c3021629
MS
1285 if (!bufs)
1286 return -ENOMEM;
1287
1288 fuse_copy_init(&cs, fc, 1, NULL, 0);
1289 cs.pipebufs = bufs;
1290 cs.pipe = pipe;
1291 ret = fuse_dev_do_read(fc, in, &cs, len);
1292 if (ret < 0)
1293 goto out;
1294
1295 ret = 0;
1296 pipe_lock(pipe);
1297
1298 if (!pipe->readers) {
1299 send_sig(SIGPIPE, current, 0);
1300 if (!ret)
1301 ret = -EPIPE;
1302 goto out_unlock;
1303 }
1304
1305 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1306 ret = -EIO;
1307 goto out_unlock;
1308 }
1309
1310 while (page_nr < cs.nr_segs) {
1311 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1312 struct pipe_buffer *buf = pipe->bufs + newbuf;
1313
1314 buf->page = bufs[page_nr].page;
1315 buf->offset = bufs[page_nr].offset;
1316 buf->len = bufs[page_nr].len;
1317 buf->ops = &fuse_dev_pipe_buf_ops;
1318
1319 pipe->nrbufs++;
1320 page_nr++;
1321 ret += buf->len;
1322
6447a3cf 1323 if (pipe->files)
c3021629
MS
1324 do_wakeup = 1;
1325 }
1326
1327out_unlock:
1328 pipe_unlock(pipe);
1329
1330 if (do_wakeup) {
1331 smp_mb();
1332 if (waitqueue_active(&pipe->wait))
1333 wake_up_interruptible(&pipe->wait);
1334 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1335 }
1336
1337out:
1338 for (; page_nr < cs.nr_segs; page_nr++)
1339 page_cache_release(bufs[page_nr].page);
1340
1341 kfree(bufs);
1342 return ret;
1343}
1344
95668a69
TH
1345static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1346 struct fuse_copy_state *cs)
1347{
1348 struct fuse_notify_poll_wakeup_out outarg;
f6d47a17 1349 int err = -EINVAL;
95668a69
TH
1350
1351 if (size != sizeof(outarg))
f6d47a17 1352 goto err;
95668a69
TH
1353
1354 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1355 if (err)
f6d47a17 1356 goto err;
95668a69 1357
f6d47a17 1358 fuse_copy_finish(cs);
95668a69 1359 return fuse_notify_poll_wakeup(fc, &outarg);
f6d47a17
MS
1360
1361err:
1362 fuse_copy_finish(cs);
1363 return err;
95668a69
TH
1364}
1365
3b463ae0
JM
1366static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1367 struct fuse_copy_state *cs)
1368{
1369 struct fuse_notify_inval_inode_out outarg;
1370 int err = -EINVAL;
1371
1372 if (size != sizeof(outarg))
1373 goto err;
1374
1375 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1376 if (err)
1377 goto err;
1378 fuse_copy_finish(cs);
1379
1380 down_read(&fc->killsb);
1381 err = -ENOENT;
b21dda43
MS
1382 if (fc->sb) {
1383 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1384 outarg.off, outarg.len);
1385 }
3b463ae0
JM
1386 up_read(&fc->killsb);
1387 return err;
1388
1389err:
1390 fuse_copy_finish(cs);
1391 return err;
1392}
1393
1394static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1395 struct fuse_copy_state *cs)
1396{
1397 struct fuse_notify_inval_entry_out outarg;
b2d82ee3
FW
1398 int err = -ENOMEM;
1399 char *buf;
3b463ae0
JM
1400 struct qstr name;
1401
b2d82ee3
FW
1402 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1403 if (!buf)
1404 goto err;
1405
1406 err = -EINVAL;
3b463ae0
JM
1407 if (size < sizeof(outarg))
1408 goto err;
1409
1410 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1411 if (err)
1412 goto err;
1413
1414 err = -ENAMETOOLONG;
1415 if (outarg.namelen > FUSE_NAME_MAX)
1416 goto err;
1417
c2183d1e
MS
1418 err = -EINVAL;
1419 if (size != sizeof(outarg) + outarg.namelen + 1)
1420 goto err;
1421
3b463ae0
JM
1422 name.name = buf;
1423 name.len = outarg.namelen;
1424 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1425 if (err)
1426 goto err;
1427 fuse_copy_finish(cs);
1428 buf[outarg.namelen] = 0;
1429 name.hash = full_name_hash(name.name, name.len);
1430
1431 down_read(&fc->killsb);
1432 err = -ENOENT;
b21dda43 1433 if (fc->sb)
451d0f59
JM
1434 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1435 up_read(&fc->killsb);
1436 kfree(buf);
1437 return err;
1438
1439err:
1440 kfree(buf);
1441 fuse_copy_finish(cs);
1442 return err;
1443}
1444
1445static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1446 struct fuse_copy_state *cs)
1447{
1448 struct fuse_notify_delete_out outarg;
1449 int err = -ENOMEM;
1450 char *buf;
1451 struct qstr name;
1452
1453 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1454 if (!buf)
1455 goto err;
1456
1457 err = -EINVAL;
1458 if (size < sizeof(outarg))
1459 goto err;
1460
1461 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1462 if (err)
1463 goto err;
1464
1465 err = -ENAMETOOLONG;
1466 if (outarg.namelen > FUSE_NAME_MAX)
1467 goto err;
1468
1469 err = -EINVAL;
1470 if (size != sizeof(outarg) + outarg.namelen + 1)
1471 goto err;
1472
1473 name.name = buf;
1474 name.len = outarg.namelen;
1475 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1476 if (err)
1477 goto err;
1478 fuse_copy_finish(cs);
1479 buf[outarg.namelen] = 0;
1480 name.hash = full_name_hash(name.name, name.len);
1481
1482 down_read(&fc->killsb);
1483 err = -ENOENT;
1484 if (fc->sb)
1485 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1486 outarg.child, &name);
3b463ae0 1487 up_read(&fc->killsb);
b2d82ee3 1488 kfree(buf);
3b463ae0
JM
1489 return err;
1490
1491err:
b2d82ee3 1492 kfree(buf);
3b463ae0
JM
1493 fuse_copy_finish(cs);
1494 return err;
1495}
1496
a1d75f25
MS
1497static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1498 struct fuse_copy_state *cs)
1499{
1500 struct fuse_notify_store_out outarg;
1501 struct inode *inode;
1502 struct address_space *mapping;
1503 u64 nodeid;
1504 int err;
1505 pgoff_t index;
1506 unsigned int offset;
1507 unsigned int num;
1508 loff_t file_size;
1509 loff_t end;
1510
1511 err = -EINVAL;
1512 if (size < sizeof(outarg))
1513 goto out_finish;
1514
1515 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1516 if (err)
1517 goto out_finish;
1518
1519 err = -EINVAL;
1520 if (size - sizeof(outarg) != outarg.size)
1521 goto out_finish;
1522
1523 nodeid = outarg.nodeid;
1524
1525 down_read(&fc->killsb);
1526
1527 err = -ENOENT;
1528 if (!fc->sb)
1529 goto out_up_killsb;
1530
1531 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1532 if (!inode)
1533 goto out_up_killsb;
1534
1535 mapping = inode->i_mapping;
1536 index = outarg.offset >> PAGE_CACHE_SHIFT;
1537 offset = outarg.offset & ~PAGE_CACHE_MASK;
1538 file_size = i_size_read(inode);
1539 end = outarg.offset + outarg.size;
1540 if (end > file_size) {
1541 file_size = end;
1542 fuse_write_update_size(inode, file_size);
1543 }
1544
1545 num = outarg.size;
1546 while (num) {
1547 struct page *page;
1548 unsigned int this_num;
1549
1550 err = -ENOMEM;
1551 page = find_or_create_page(mapping, index,
1552 mapping_gfp_mask(mapping));
1553 if (!page)
1554 goto out_iput;
1555
1556 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1557 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1558 if (!err && offset == 0 && (num != 0 || file_size == end))
1559 SetPageUptodate(page);
1560 unlock_page(page);
1561 page_cache_release(page);
1562
1563 if (err)
1564 goto out_iput;
1565
1566 num -= this_num;
1567 offset = 0;
1568 index++;
1569 }
1570
1571 err = 0;
1572
1573out_iput:
1574 iput(inode);
1575out_up_killsb:
1576 up_read(&fc->killsb);
1577out_finish:
1578 fuse_copy_finish(cs);
1579 return err;
1580}
1581
2d45ba38
MS
1582static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1583{
0be8557b 1584 release_pages(req->pages, req->num_pages, 0);
2d45ba38
MS
1585}
1586
1587static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1588 struct fuse_notify_retrieve_out *outarg)
1589{
1590 int err;
1591 struct address_space *mapping = inode->i_mapping;
1592 struct fuse_req *req;
1593 pgoff_t index;
1594 loff_t file_size;
1595 unsigned int num;
1596 unsigned int offset;
0157443c 1597 size_t total_len = 0;
4d53dc99 1598 int num_pages;
2d45ba38 1599
4d53dc99
MP
1600 offset = outarg->offset & ~PAGE_CACHE_MASK;
1601 file_size = i_size_read(inode);
1602
1603 num = outarg->size;
1604 if (outarg->offset > file_size)
1605 num = 0;
1606 else if (outarg->offset + num > file_size)
1607 num = file_size - outarg->offset;
1608
1609 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1610 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1611
1612 req = fuse_get_req(fc, num_pages);
2d45ba38
MS
1613 if (IS_ERR(req))
1614 return PTR_ERR(req);
1615
2d45ba38
MS
1616 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1617 req->in.h.nodeid = outarg->nodeid;
1618 req->in.numargs = 2;
1619 req->in.argpages = 1;
b2430d75 1620 req->page_descs[0].offset = offset;
2d45ba38
MS
1621 req->end = fuse_retrieve_end;
1622
1623 index = outarg->offset >> PAGE_CACHE_SHIFT;
2d45ba38 1624
4d53dc99 1625 while (num && req->num_pages < num_pages) {
2d45ba38
MS
1626 struct page *page;
1627 unsigned int this_num;
1628
1629 page = find_get_page(mapping, index);
1630 if (!page)
1631 break;
1632
1633 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1634 req->pages[req->num_pages] = page;
85f40aec 1635 req->page_descs[req->num_pages].length = this_num;
2d45ba38
MS
1636 req->num_pages++;
1637
c9e67d48 1638 offset = 0;
2d45ba38
MS
1639 num -= this_num;
1640 total_len += this_num;
48706d0a 1641 index++;
2d45ba38
MS
1642 }
1643 req->misc.retrieve_in.offset = outarg->offset;
1644 req->misc.retrieve_in.size = total_len;
1645 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1646 req->in.args[0].value = &req->misc.retrieve_in;
1647 req->in.args[1].size = total_len;
1648
1649 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1650 if (err)
1651 fuse_retrieve_end(fc, req);
1652
1653 return err;
1654}
1655
1656static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1657 struct fuse_copy_state *cs)
1658{
1659 struct fuse_notify_retrieve_out outarg;
1660 struct inode *inode;
1661 int err;
1662
1663 err = -EINVAL;
1664 if (size != sizeof(outarg))
1665 goto copy_finish;
1666
1667 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1668 if (err)
1669 goto copy_finish;
1670
1671 fuse_copy_finish(cs);
1672
1673 down_read(&fc->killsb);
1674 err = -ENOENT;
1675 if (fc->sb) {
1676 u64 nodeid = outarg.nodeid;
1677
1678 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1679 if (inode) {
1680 err = fuse_retrieve(fc, inode, &outarg);
1681 iput(inode);
1682 }
1683 }
1684 up_read(&fc->killsb);
1685
1686 return err;
1687
1688copy_finish:
1689 fuse_copy_finish(cs);
1690 return err;
1691}
1692
8599396b
TH
1693static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1694 unsigned int size, struct fuse_copy_state *cs)
1695{
1696 switch (code) {
95668a69
TH
1697 case FUSE_NOTIFY_POLL:
1698 return fuse_notify_poll(fc, size, cs);
1699
3b463ae0
JM
1700 case FUSE_NOTIFY_INVAL_INODE:
1701 return fuse_notify_inval_inode(fc, size, cs);
1702
1703 case FUSE_NOTIFY_INVAL_ENTRY:
1704 return fuse_notify_inval_entry(fc, size, cs);
1705
a1d75f25
MS
1706 case FUSE_NOTIFY_STORE:
1707 return fuse_notify_store(fc, size, cs);
1708
2d45ba38
MS
1709 case FUSE_NOTIFY_RETRIEVE:
1710 return fuse_notify_retrieve(fc, size, cs);
1711
451d0f59
JM
1712 case FUSE_NOTIFY_DELETE:
1713 return fuse_notify_delete(fc, size, cs);
1714
8599396b 1715 default:
f6d47a17 1716 fuse_copy_finish(cs);
8599396b
TH
1717 return -EINVAL;
1718 }
1719}
1720
334f485d
MS
1721/* Look up request on processing list by unique ID */
1722static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1723{
1724 struct list_head *entry;
1725
1726 list_for_each(entry, &fc->processing) {
1727 struct fuse_req *req;
1728 req = list_entry(entry, struct fuse_req, list);
a4d27e75 1729 if (req->in.h.unique == unique || req->intr_unique == unique)
334f485d
MS
1730 return req;
1731 }
1732 return NULL;
1733}
1734
1735static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1736 unsigned nbytes)
1737{
1738 unsigned reqsize = sizeof(struct fuse_out_header);
1739
1740 if (out->h.error)
1741 return nbytes != reqsize ? -EINVAL : 0;
1742
1743 reqsize += len_args(out->numargs, out->args);
1744
1745 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1746 return -EINVAL;
1747 else if (reqsize > nbytes) {
1748 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1749 unsigned diffsize = reqsize - nbytes;
1750 if (diffsize > lastarg->size)
1751 return -EINVAL;
1752 lastarg->size -= diffsize;
1753 }
1754 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1755 out->page_zeroing);
1756}
1757
1758/*
1759 * Write a single reply to a request. First the header is copied from
1760 * the write buffer. The request is then searched on the processing
1761 * list by the unique ID found in the header. If found, then remove
1762 * it from the list and copy the rest of the buffer to the request.
1763 * The request is finished by calling request_end()
1764 */
dd3bb14f
MS
1765static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1766 struct fuse_copy_state *cs, size_t nbytes)
334f485d
MS
1767{
1768 int err;
334f485d
MS
1769 struct fuse_req *req;
1770 struct fuse_out_header oh;
334f485d 1771
334f485d
MS
1772 if (nbytes < sizeof(struct fuse_out_header))
1773 return -EINVAL;
1774
dd3bb14f 1775 err = fuse_copy_one(cs, &oh, sizeof(oh));
334f485d
MS
1776 if (err)
1777 goto err_finish;
8599396b
TH
1778
1779 err = -EINVAL;
1780 if (oh.len != nbytes)
1781 goto err_finish;
1782
1783 /*
1784 * Zero oh.unique indicates unsolicited notification message
1785 * and error contains notification code.
1786 */
1787 if (!oh.unique) {
dd3bb14f 1788 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
8599396b
TH
1789 return err ? err : nbytes;
1790 }
1791
334f485d 1792 err = -EINVAL;
8599396b 1793 if (oh.error <= -1000 || oh.error > 0)
334f485d
MS
1794 goto err_finish;
1795
d7133114 1796 spin_lock(&fc->lock);
69a53bf2
MS
1797 err = -ENOENT;
1798 if (!fc->connected)
1799 goto err_unlock;
1800
334f485d 1801 req = request_find(fc, oh.unique);
334f485d
MS
1802 if (!req)
1803 goto err_unlock;
1804
f9a2842e 1805 if (req->aborted) {
d7133114 1806 spin_unlock(&fc->lock);
dd3bb14f 1807 fuse_copy_finish(cs);
d7133114 1808 spin_lock(&fc->lock);
222f1d69 1809 request_end(fc, req);
334f485d
MS
1810 return -ENOENT;
1811 }
a4d27e75
MS
1812 /* Is it an interrupt reply? */
1813 if (req->intr_unique == oh.unique) {
1814 err = -EINVAL;
1815 if (nbytes != sizeof(struct fuse_out_header))
1816 goto err_unlock;
1817
1818 if (oh.error == -ENOSYS)
1819 fc->no_interrupt = 1;
1820 else if (oh.error == -EAGAIN)
1821 queue_interrupt(fc, req);
1822
1823 spin_unlock(&fc->lock);
dd3bb14f 1824 fuse_copy_finish(cs);
a4d27e75
MS
1825 return nbytes;
1826 }
1827
1828 req->state = FUSE_REQ_WRITING;
d77a1d5b 1829 list_move(&req->list, &fc->io);
334f485d
MS
1830 req->out.h = oh;
1831 req->locked = 1;
dd3bb14f 1832 cs->req = req;
ce534fb0
MS
1833 if (!req->out.page_replace)
1834 cs->move_pages = 0;
d7133114 1835 spin_unlock(&fc->lock);
334f485d 1836
dd3bb14f
MS
1837 err = copy_out_args(cs, &req->out, nbytes);
1838 fuse_copy_finish(cs);
334f485d 1839
d7133114 1840 spin_lock(&fc->lock);
334f485d
MS
1841 req->locked = 0;
1842 if (!err) {
f9a2842e 1843 if (req->aborted)
334f485d 1844 err = -ENOENT;
f9a2842e 1845 } else if (!req->aborted)
334f485d
MS
1846 req->out.h.error = -EIO;
1847 request_end(fc, req);
1848
1849 return err ? err : nbytes;
1850
1851 err_unlock:
d7133114 1852 spin_unlock(&fc->lock);
334f485d 1853 err_finish:
dd3bb14f 1854 fuse_copy_finish(cs);
334f485d
MS
1855 return err;
1856}
1857
dd3bb14f
MS
1858static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1859 unsigned long nr_segs, loff_t pos)
1860{
1861 struct fuse_copy_state cs;
1862 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1863 if (!fc)
1864 return -EPERM;
1865
c3021629 1866 fuse_copy_init(&cs, fc, 0, iov, nr_segs);
dd3bb14f
MS
1867
1868 return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1869}
1870
1871static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1872 struct file *out, loff_t *ppos,
1873 size_t len, unsigned int flags)
1874{
1875 unsigned nbuf;
1876 unsigned idx;
1877 struct pipe_buffer *bufs;
1878 struct fuse_copy_state cs;
1879 struct fuse_conn *fc;
1880 size_t rem;
1881 ssize_t ret;
1882
1883 fc = fuse_get_conn(out);
1884 if (!fc)
1885 return -EPERM;
1886
07e77dca 1887 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
dd3bb14f
MS
1888 if (!bufs)
1889 return -ENOMEM;
1890
1891 pipe_lock(pipe);
1892 nbuf = 0;
1893 rem = 0;
1894 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1895 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1896
1897 ret = -EINVAL;
1898 if (rem < len) {
1899 pipe_unlock(pipe);
1900 goto out;
1901 }
1902
1903 rem = len;
1904 while (rem) {
1905 struct pipe_buffer *ibuf;
1906 struct pipe_buffer *obuf;
1907
1908 BUG_ON(nbuf >= pipe->buffers);
1909 BUG_ON(!pipe->nrbufs);
1910 ibuf = &pipe->bufs[pipe->curbuf];
1911 obuf = &bufs[nbuf];
1912
1913 if (rem >= ibuf->len) {
1914 *obuf = *ibuf;
1915 ibuf->ops = NULL;
1916 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1917 pipe->nrbufs--;
1918 } else {
1919 ibuf->ops->get(pipe, ibuf);
1920 *obuf = *ibuf;
1921 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1922 obuf->len = rem;
1923 ibuf->offset += obuf->len;
1924 ibuf->len -= obuf->len;
1925 }
1926 nbuf++;
1927 rem -= obuf->len;
1928 }
1929 pipe_unlock(pipe);
1930
c3021629 1931 fuse_copy_init(&cs, fc, 0, NULL, nbuf);
dd3bb14f 1932 cs.pipebufs = bufs;
dd3bb14f
MS
1933 cs.pipe = pipe;
1934
ce534fb0
MS
1935 if (flags & SPLICE_F_MOVE)
1936 cs.move_pages = 1;
1937
dd3bb14f
MS
1938 ret = fuse_dev_do_write(fc, &cs, len);
1939
1940 for (idx = 0; idx < nbuf; idx++) {
1941 struct pipe_buffer *buf = &bufs[idx];
1942 buf->ops->release(pipe, buf);
1943 }
1944out:
1945 kfree(bufs);
1946 return ret;
1947}
1948
334f485d
MS
1949static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1950{
334f485d 1951 unsigned mask = POLLOUT | POLLWRNORM;
7025d9ad 1952 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 1953 if (!fc)
7025d9ad 1954 return POLLERR;
334f485d
MS
1955
1956 poll_wait(file, &fc->waitq, wait);
1957
d7133114 1958 spin_lock(&fc->lock);
7025d9ad
MS
1959 if (!fc->connected)
1960 mask = POLLERR;
a4d27e75 1961 else if (request_pending(fc))
7025d9ad 1962 mask |= POLLIN | POLLRDNORM;
d7133114 1963 spin_unlock(&fc->lock);
334f485d
MS
1964
1965 return mask;
1966}
1967
69a53bf2
MS
1968/*
1969 * Abort all requests on the given list (pending or processing)
1970 *
d7133114 1971 * This function releases and reacquires fc->lock
69a53bf2 1972 */
334f485d 1973static void end_requests(struct fuse_conn *fc, struct list_head *head)
b9ca67b2
MS
1974__releases(fc->lock)
1975__acquires(fc->lock)
334f485d
MS
1976{
1977 while (!list_empty(head)) {
1978 struct fuse_req *req;
1979 req = list_entry(head->next, struct fuse_req, list);
334f485d
MS
1980 req->out.h.error = -ECONNABORTED;
1981 request_end(fc, req);
d7133114 1982 spin_lock(&fc->lock);
334f485d
MS
1983 }
1984}
1985
69a53bf2
MS
1986/*
1987 * Abort requests under I/O
1988 *
f9a2842e 1989 * The requests are set to aborted and finished, and the request
69a53bf2
MS
1990 * waiter is woken up. This will make request_wait_answer() wait
1991 * until the request is unlocked and then return.
64c6d8ed
MS
1992 *
1993 * If the request is asynchronous, then the end function needs to be
1994 * called after waiting for the request to be unlocked (if it was
1995 * locked).
69a53bf2
MS
1996 */
1997static void end_io_requests(struct fuse_conn *fc)
b9ca67b2
MS
1998__releases(fc->lock)
1999__acquires(fc->lock)
69a53bf2
MS
2000{
2001 while (!list_empty(&fc->io)) {
64c6d8ed
MS
2002 struct fuse_req *req =
2003 list_entry(fc->io.next, struct fuse_req, list);
2004 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
2005
f9a2842e 2006 req->aborted = 1;
69a53bf2
MS
2007 req->out.h.error = -ECONNABORTED;
2008 req->state = FUSE_REQ_FINISHED;
2009 list_del_init(&req->list);
2010 wake_up(&req->waitq);
64c6d8ed
MS
2011 if (end) {
2012 req->end = NULL;
64c6d8ed 2013 __fuse_get_request(req);
d7133114 2014 spin_unlock(&fc->lock);
64c6d8ed
MS
2015 wait_event(req->waitq, !req->locked);
2016 end(fc, req);
e9bb09dd 2017 fuse_put_request(fc, req);
d7133114 2018 spin_lock(&fc->lock);
64c6d8ed 2019 }
69a53bf2
MS
2020 }
2021}
2022
595afaf9 2023static void end_queued_requests(struct fuse_conn *fc)
b9ca67b2
MS
2024__releases(fc->lock)
2025__acquires(fc->lock)
595afaf9
MS
2026{
2027 fc->max_background = UINT_MAX;
2028 flush_bg_queue(fc);
2029 end_requests(fc, &fc->pending);
2030 end_requests(fc, &fc->processing);
07e77dca 2031 while (forget_pending(fc))
02c048b9 2032 kfree(dequeue_forget(fc, 1, NULL));
595afaf9
MS
2033}
2034
357ccf2b
BG
2035static void end_polls(struct fuse_conn *fc)
2036{
2037 struct rb_node *p;
2038
2039 p = rb_first(&fc->polled_files);
2040
2041 while (p) {
2042 struct fuse_file *ff;
2043 ff = rb_entry(p, struct fuse_file, polled_node);
2044 wake_up_interruptible_all(&ff->poll_wait);
2045
2046 p = rb_next(p);
2047 }
2048}
2049
69a53bf2
MS
2050/*
2051 * Abort all requests.
2052 *
2053 * Emergency exit in case of a malicious or accidental deadlock, or
2054 * just a hung filesystem.
2055 *
2056 * The same effect is usually achievable through killing the
2057 * filesystem daemon and all users of the filesystem. The exception
2058 * is the combination of an asynchronous request and the tricky
2059 * deadlock (see Documentation/filesystems/fuse.txt).
2060 *
2061 * During the aborting, progression of requests from the pending and
2062 * processing lists onto the io list, and progression of new requests
2063 * onto the pending list is prevented by req->connected being false.
2064 *
2065 * Progression of requests under I/O to the processing list is
f9a2842e
MS
2066 * prevented by the req->aborted flag being true for these requests.
2067 * For this reason requests on the io list must be aborted first.
69a53bf2
MS
2068 */
2069void fuse_abort_conn(struct fuse_conn *fc)
2070{
d7133114 2071 spin_lock(&fc->lock);
69a53bf2
MS
2072 if (fc->connected) {
2073 fc->connected = 0;
51eb01e7 2074 fc->blocked = 0;
69a53bf2 2075 end_io_requests(fc);
595afaf9 2076 end_queued_requests(fc);
357ccf2b 2077 end_polls(fc);
69a53bf2 2078 wake_up_all(&fc->waitq);
51eb01e7 2079 wake_up_all(&fc->blocked_waitq);
385a17bf 2080 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
69a53bf2 2081 }
d7133114 2082 spin_unlock(&fc->lock);
69a53bf2 2083}
08cbf542 2084EXPORT_SYMBOL_GPL(fuse_abort_conn);
69a53bf2 2085
08cbf542 2086int fuse_dev_release(struct inode *inode, struct file *file)
334f485d 2087{
0720b315 2088 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 2089 if (fc) {
d7133114 2090 spin_lock(&fc->lock);
1e9a4ed9 2091 fc->connected = 0;
595afaf9
MS
2092 fc->blocked = 0;
2093 end_queued_requests(fc);
357ccf2b 2094 end_polls(fc);
595afaf9 2095 wake_up_all(&fc->blocked_waitq);
d7133114 2096 spin_unlock(&fc->lock);
bafa9654 2097 fuse_conn_put(fc);
385a17bf 2098 }
f543f253 2099
334f485d
MS
2100 return 0;
2101}
08cbf542 2102EXPORT_SYMBOL_GPL(fuse_dev_release);
334f485d 2103
385a17bf
JD
2104static int fuse_dev_fasync(int fd, struct file *file, int on)
2105{
2106 struct fuse_conn *fc = fuse_get_conn(file);
2107 if (!fc)
a87046d8 2108 return -EPERM;
385a17bf
JD
2109
2110 /* No locking - fasync_helper does its own locking */
2111 return fasync_helper(fd, file, on, &fc->fasync);
2112}
2113
4b6f5d20 2114const struct file_operations fuse_dev_operations = {
334f485d
MS
2115 .owner = THIS_MODULE,
2116 .llseek = no_llseek,
ee0b3e67
BP
2117 .read = do_sync_read,
2118 .aio_read = fuse_dev_read,
c3021629 2119 .splice_read = fuse_dev_splice_read,
ee0b3e67
BP
2120 .write = do_sync_write,
2121 .aio_write = fuse_dev_write,
dd3bb14f 2122 .splice_write = fuse_dev_splice_write,
334f485d
MS
2123 .poll = fuse_dev_poll,
2124 .release = fuse_dev_release,
385a17bf 2125 .fasync = fuse_dev_fasync,
334f485d 2126};
08cbf542 2127EXPORT_SYMBOL_GPL(fuse_dev_operations);
334f485d
MS
2128
2129static struct miscdevice fuse_miscdevice = {
2130 .minor = FUSE_MINOR,
2131 .name = "fuse",
2132 .fops = &fuse_dev_operations,
2133};
2134
2135int __init fuse_dev_init(void)
2136{
2137 int err = -ENOMEM;
2138 fuse_req_cachep = kmem_cache_create("fuse_request",
2139 sizeof(struct fuse_req),
20c2df83 2140 0, 0, NULL);
334f485d
MS
2141 if (!fuse_req_cachep)
2142 goto out;
2143
2144 err = misc_register(&fuse_miscdevice);
2145 if (err)
2146 goto out_cache_clean;
2147
2148 return 0;
2149
2150 out_cache_clean:
2151 kmem_cache_destroy(fuse_req_cachep);
2152 out:
2153 return err;
2154}
2155
2156void fuse_dev_cleanup(void)
2157{
2158 misc_deregister(&fuse_miscdevice);
2159 kmem_cache_destroy(fuse_req_cachep);
2160}