]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/fuse/dev.c
[PATCH] fuse: uninline some functions
[mirror_ubuntu-bionic-kernel.git] / fs / fuse / dev.c
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
21
22 static kmem_cache_t *fuse_req_cachep;
23
24 static struct fuse_conn *fuse_get_conn(struct file *file)
25 {
26 struct fuse_conn *fc;
27 spin_lock(&fuse_lock);
28 fc = file->private_data;
29 if (fc && !fc->mounted)
30 fc = NULL;
31 spin_unlock(&fuse_lock);
32 return fc;
33 }
34
35 static void fuse_request_init(struct fuse_req *req)
36 {
37 memset(req, 0, sizeof(*req));
38 INIT_LIST_HEAD(&req->list);
39 init_waitqueue_head(&req->waitq);
40 atomic_set(&req->count, 1);
41 }
42
43 struct fuse_req *fuse_request_alloc(void)
44 {
45 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
46 if (req)
47 fuse_request_init(req);
48 return req;
49 }
50
51 void fuse_request_free(struct fuse_req *req)
52 {
53 kmem_cache_free(fuse_req_cachep, req);
54 }
55
56 static void block_sigs(sigset_t *oldset)
57 {
58 sigset_t mask;
59
60 siginitsetinv(&mask, sigmask(SIGKILL));
61 sigprocmask(SIG_BLOCK, &mask, oldset);
62 }
63
64 static void restore_sigs(sigset_t *oldset)
65 {
66 sigprocmask(SIG_SETMASK, oldset, NULL);
67 }
68
69 void fuse_reset_request(struct fuse_req *req)
70 {
71 int preallocated = req->preallocated;
72 BUG_ON(atomic_read(&req->count) != 1);
73 fuse_request_init(req);
74 req->preallocated = preallocated;
75 }
76
77 static void __fuse_get_request(struct fuse_req *req)
78 {
79 atomic_inc(&req->count);
80 }
81
82 /* Must be called with > 1 refcount */
83 static void __fuse_put_request(struct fuse_req *req)
84 {
85 BUG_ON(atomic_read(&req->count) < 2);
86 atomic_dec(&req->count);
87 }
88
89 static struct fuse_req *do_get_request(struct fuse_conn *fc)
90 {
91 struct fuse_req *req;
92
93 spin_lock(&fuse_lock);
94 BUG_ON(list_empty(&fc->unused_list));
95 req = list_entry(fc->unused_list.next, struct fuse_req, list);
96 list_del_init(&req->list);
97 spin_unlock(&fuse_lock);
98 fuse_request_init(req);
99 req->preallocated = 1;
100 req->in.h.uid = current->fsuid;
101 req->in.h.gid = current->fsgid;
102 req->in.h.pid = current->pid;
103 return req;
104 }
105
106 /* This can return NULL, but only in case it's interrupted by a SIGKILL */
107 struct fuse_req *fuse_get_request(struct fuse_conn *fc)
108 {
109 int intr;
110 sigset_t oldset;
111
112 block_sigs(&oldset);
113 intr = down_interruptible(&fc->outstanding_sem);
114 restore_sigs(&oldset);
115 return intr ? NULL : do_get_request(fc);
116 }
117
118 static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
119 {
120 spin_lock(&fuse_lock);
121 if (req->preallocated)
122 list_add(&req->list, &fc->unused_list);
123 else
124 fuse_request_free(req);
125
126 /* If we are in debt decrease that first */
127 if (fc->outstanding_debt)
128 fc->outstanding_debt--;
129 else
130 up(&fc->outstanding_sem);
131 spin_unlock(&fuse_lock);
132 }
133
134 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
135 {
136 if (atomic_dec_and_test(&req->count))
137 fuse_putback_request(fc, req);
138 }
139
140 void fuse_release_background(struct fuse_req *req)
141 {
142 iput(req->inode);
143 iput(req->inode2);
144 if (req->file)
145 fput(req->file);
146 spin_lock(&fuse_lock);
147 list_del(&req->bg_entry);
148 spin_unlock(&fuse_lock);
149 }
150
151 static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
152 {
153 int i;
154 struct fuse_init_out *arg = &req->misc.init_out;
155
156 if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION)
157 fc->conn_error = 1;
158 else {
159 fc->minor = arg->minor;
160 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
161 }
162
163 /* After INIT reply is received other requests can go
164 out. So do (FUSE_MAX_OUTSTANDING - 1) number of
165 up()s on outstanding_sem. The last up() is done in
166 fuse_putback_request() */
167 for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)
168 up(&fc->outstanding_sem);
169 }
170
171 /*
172 * This function is called when a request is finished. Either a reply
173 * has arrived or it was interrupted (and not yet sent) or some error
174 * occurred during communication with userspace, or the device file
175 * was closed. In case of a background request the reference to the
176 * stored objects are released. The requester thread is woken up (if
177 * still waiting), and finally the reference to the request is
178 * released
179 *
180 * Called with fuse_lock, unlocks it
181 */
182 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
183 {
184 req->finished = 1;
185 spin_unlock(&fuse_lock);
186 if (req->background) {
187 down_read(&fc->sbput_sem);
188 if (fc->mounted)
189 fuse_release_background(req);
190 up_read(&fc->sbput_sem);
191 }
192 wake_up(&req->waitq);
193 if (req->in.h.opcode == FUSE_INIT)
194 process_init_reply(fc, req);
195 else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) {
196 /* Special case for failed iget in CREATE */
197 u64 nodeid = req->in.h.nodeid;
198 fuse_reset_request(req);
199 fuse_send_forget(fc, req, nodeid, 1);
200 return;
201 }
202 fuse_put_request(fc, req);
203 }
204
205 /*
206 * Unfortunately request interruption not just solves the deadlock
207 * problem, it causes problems too. These stem from the fact, that an
208 * interrupted request is continued to be processed in userspace,
209 * while all the locks and object references (inode and file) held
210 * during the operation are released.
211 *
212 * To release the locks is exactly why there's a need to interrupt the
213 * request, so there's not a lot that can be done about this, except
214 * introduce additional locking in userspace.
215 *
216 * More important is to keep inode and file references until userspace
217 * has replied, otherwise FORGET and RELEASE could be sent while the
218 * inode/file is still used by the filesystem.
219 *
220 * For this reason the concept of "background" request is introduced.
221 * An interrupted request is backgrounded if it has been already sent
222 * to userspace. Backgrounding involves getting an extra reference to
223 * inode(s) or file used in the request, and adding the request to
224 * fc->background list. When a reply is received for a background
225 * request, the object references are released, and the request is
226 * removed from the list. If the filesystem is unmounted while there
227 * are still background requests, the list is walked and references
228 * are released as if a reply was received.
229 *
230 * There's one more use for a background request. The RELEASE message is
231 * always sent as background, since it doesn't return an error or
232 * data.
233 */
234 static void background_request(struct fuse_conn *fc, struct fuse_req *req)
235 {
236 req->background = 1;
237 list_add(&req->bg_entry, &fc->background);
238 if (req->inode)
239 req->inode = igrab(req->inode);
240 if (req->inode2)
241 req->inode2 = igrab(req->inode2);
242 if (req->file)
243 get_file(req->file);
244 }
245
246 /* Called with fuse_lock held. Releases, and then reacquires it. */
247 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
248 {
249 sigset_t oldset;
250
251 spin_unlock(&fuse_lock);
252 block_sigs(&oldset);
253 wait_event_interruptible(req->waitq, req->finished);
254 restore_sigs(&oldset);
255 spin_lock(&fuse_lock);
256 if (req->finished)
257 return;
258
259 req->out.h.error = -EINTR;
260 req->interrupted = 1;
261 if (req->locked) {
262 /* This is uninterruptible sleep, because data is
263 being copied to/from the buffers of req. During
264 locked state, there mustn't be any filesystem
265 operation (e.g. page fault), since that could lead
266 to deadlock */
267 spin_unlock(&fuse_lock);
268 wait_event(req->waitq, !req->locked);
269 spin_lock(&fuse_lock);
270 }
271 if (!req->sent && !list_empty(&req->list)) {
272 list_del(&req->list);
273 __fuse_put_request(req);
274 } else if (!req->finished && req->sent)
275 background_request(fc, req);
276 }
277
278 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
279 {
280 unsigned nbytes = 0;
281 unsigned i;
282
283 for (i = 0; i < numargs; i++)
284 nbytes += args[i].size;
285
286 return nbytes;
287 }
288
289 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
290 {
291 fc->reqctr++;
292 /* zero is special */
293 if (fc->reqctr == 0)
294 fc->reqctr = 1;
295 req->in.h.unique = fc->reqctr;
296 req->in.h.len = sizeof(struct fuse_in_header) +
297 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
298 if (!req->preallocated) {
299 /* If request is not preallocated (either FORGET or
300 RELEASE), then still decrease outstanding_sem, so
301 user can't open infinite number of files while not
302 processing the RELEASE requests. However for
303 efficiency do it without blocking, so if down()
304 would block, just increase the debt instead */
305 if (down_trylock(&fc->outstanding_sem))
306 fc->outstanding_debt++;
307 }
308 list_add_tail(&req->list, &fc->pending);
309 wake_up(&fc->waitq);
310 }
311
312 /*
313 * This can only be interrupted by a SIGKILL
314 */
315 void request_send(struct fuse_conn *fc, struct fuse_req *req)
316 {
317 req->isreply = 1;
318 spin_lock(&fuse_lock);
319 if (!fc->connected)
320 req->out.h.error = -ENOTCONN;
321 else if (fc->conn_error)
322 req->out.h.error = -ECONNREFUSED;
323 else {
324 queue_request(fc, req);
325 /* acquire extra reference, since request is still needed
326 after request_end() */
327 __fuse_get_request(req);
328
329 request_wait_answer(fc, req);
330 }
331 spin_unlock(&fuse_lock);
332 }
333
334 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
335 {
336 spin_lock(&fuse_lock);
337 if (fc->connected) {
338 queue_request(fc, req);
339 spin_unlock(&fuse_lock);
340 } else {
341 req->out.h.error = -ENOTCONN;
342 request_end(fc, req);
343 }
344 }
345
346 void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
347 {
348 req->isreply = 0;
349 request_send_nowait(fc, req);
350 }
351
352 void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
353 {
354 req->isreply = 1;
355 spin_lock(&fuse_lock);
356 background_request(fc, req);
357 spin_unlock(&fuse_lock);
358 request_send_nowait(fc, req);
359 }
360
361 void fuse_send_init(struct fuse_conn *fc)
362 {
363 /* This is called from fuse_read_super() so there's guaranteed
364 to be a request available */
365 struct fuse_req *req = do_get_request(fc);
366 struct fuse_init_in *arg = &req->misc.init_in;
367 arg->major = FUSE_KERNEL_VERSION;
368 arg->minor = FUSE_KERNEL_MINOR_VERSION;
369 req->in.h.opcode = FUSE_INIT;
370 req->in.numargs = 1;
371 req->in.args[0].size = sizeof(*arg);
372 req->in.args[0].value = arg;
373 req->out.numargs = 1;
374 /* Variable length arguement used for backward compatibility
375 with interface version < 7.5. Rest of init_out is zeroed
376 by do_get_request(), so a short reply is not a problem */
377 req->out.argvar = 1;
378 req->out.args[0].size = sizeof(struct fuse_init_out);
379 req->out.args[0].value = &req->misc.init_out;
380 request_send_background(fc, req);
381 }
382
383 /*
384 * Lock the request. Up to the next unlock_request() there mustn't be
385 * anything that could cause a page-fault. If the request was already
386 * interrupted bail out.
387 */
388 static int lock_request(struct fuse_req *req)
389 {
390 int err = 0;
391 if (req) {
392 spin_lock(&fuse_lock);
393 if (req->interrupted)
394 err = -ENOENT;
395 else
396 req->locked = 1;
397 spin_unlock(&fuse_lock);
398 }
399 return err;
400 }
401
402 /*
403 * Unlock request. If it was interrupted during being locked, the
404 * requester thread is currently waiting for it to be unlocked, so
405 * wake it up.
406 */
407 static void unlock_request(struct fuse_req *req)
408 {
409 if (req) {
410 spin_lock(&fuse_lock);
411 req->locked = 0;
412 if (req->interrupted)
413 wake_up(&req->waitq);
414 spin_unlock(&fuse_lock);
415 }
416 }
417
418 struct fuse_copy_state {
419 int write;
420 struct fuse_req *req;
421 const struct iovec *iov;
422 unsigned long nr_segs;
423 unsigned long seglen;
424 unsigned long addr;
425 struct page *pg;
426 void *mapaddr;
427 void *buf;
428 unsigned len;
429 };
430
431 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
432 struct fuse_req *req, const struct iovec *iov,
433 unsigned long nr_segs)
434 {
435 memset(cs, 0, sizeof(*cs));
436 cs->write = write;
437 cs->req = req;
438 cs->iov = iov;
439 cs->nr_segs = nr_segs;
440 }
441
442 /* Unmap and put previous page of userspace buffer */
443 static void fuse_copy_finish(struct fuse_copy_state *cs)
444 {
445 if (cs->mapaddr) {
446 kunmap_atomic(cs->mapaddr, KM_USER0);
447 if (cs->write) {
448 flush_dcache_page(cs->pg);
449 set_page_dirty_lock(cs->pg);
450 }
451 put_page(cs->pg);
452 cs->mapaddr = NULL;
453 }
454 }
455
456 /*
457 * Get another pagefull of userspace buffer, and map it to kernel
458 * address space, and lock request
459 */
460 static int fuse_copy_fill(struct fuse_copy_state *cs)
461 {
462 unsigned long offset;
463 int err;
464
465 unlock_request(cs->req);
466 fuse_copy_finish(cs);
467 if (!cs->seglen) {
468 BUG_ON(!cs->nr_segs);
469 cs->seglen = cs->iov[0].iov_len;
470 cs->addr = (unsigned long) cs->iov[0].iov_base;
471 cs->iov ++;
472 cs->nr_segs --;
473 }
474 down_read(&current->mm->mmap_sem);
475 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
476 &cs->pg, NULL);
477 up_read(&current->mm->mmap_sem);
478 if (err < 0)
479 return err;
480 BUG_ON(err != 1);
481 offset = cs->addr % PAGE_SIZE;
482 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
483 cs->buf = cs->mapaddr + offset;
484 cs->len = min(PAGE_SIZE - offset, cs->seglen);
485 cs->seglen -= cs->len;
486 cs->addr += cs->len;
487
488 return lock_request(cs->req);
489 }
490
491 /* Do as much copy to/from userspace buffer as we can */
492 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
493 {
494 unsigned ncpy = min(*size, cs->len);
495 if (val) {
496 if (cs->write)
497 memcpy(cs->buf, *val, ncpy);
498 else
499 memcpy(*val, cs->buf, ncpy);
500 *val += ncpy;
501 }
502 *size -= ncpy;
503 cs->len -= ncpy;
504 cs->buf += ncpy;
505 return ncpy;
506 }
507
508 /*
509 * Copy a page in the request to/from the userspace buffer. Must be
510 * done atomically
511 */
512 static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
513 unsigned offset, unsigned count, int zeroing)
514 {
515 if (page && zeroing && count < PAGE_SIZE) {
516 void *mapaddr = kmap_atomic(page, KM_USER1);
517 memset(mapaddr, 0, PAGE_SIZE);
518 kunmap_atomic(mapaddr, KM_USER1);
519 }
520 while (count) {
521 int err;
522 if (!cs->len && (err = fuse_copy_fill(cs)))
523 return err;
524 if (page) {
525 void *mapaddr = kmap_atomic(page, KM_USER1);
526 void *buf = mapaddr + offset;
527 offset += fuse_copy_do(cs, &buf, &count);
528 kunmap_atomic(mapaddr, KM_USER1);
529 } else
530 offset += fuse_copy_do(cs, NULL, &count);
531 }
532 if (page && !cs->write)
533 flush_dcache_page(page);
534 return 0;
535 }
536
537 /* Copy pages in the request to/from userspace buffer */
538 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
539 int zeroing)
540 {
541 unsigned i;
542 struct fuse_req *req = cs->req;
543 unsigned offset = req->page_offset;
544 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
545
546 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
547 struct page *page = req->pages[i];
548 int err = fuse_copy_page(cs, page, offset, count, zeroing);
549 if (err)
550 return err;
551
552 nbytes -= count;
553 count = min(nbytes, (unsigned) PAGE_SIZE);
554 offset = 0;
555 }
556 return 0;
557 }
558
559 /* Copy a single argument in the request to/from userspace buffer */
560 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
561 {
562 while (size) {
563 int err;
564 if (!cs->len && (err = fuse_copy_fill(cs)))
565 return err;
566 fuse_copy_do(cs, &val, &size);
567 }
568 return 0;
569 }
570
571 /* Copy request arguments to/from userspace buffer */
572 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
573 unsigned argpages, struct fuse_arg *args,
574 int zeroing)
575 {
576 int err = 0;
577 unsigned i;
578
579 for (i = 0; !err && i < numargs; i++) {
580 struct fuse_arg *arg = &args[i];
581 if (i == numargs - 1 && argpages)
582 err = fuse_copy_pages(cs, arg->size, zeroing);
583 else
584 err = fuse_copy_one(cs, arg->value, arg->size);
585 }
586 return err;
587 }
588
589 /* Wait until a request is available on the pending list */
590 static void request_wait(struct fuse_conn *fc)
591 {
592 DECLARE_WAITQUEUE(wait, current);
593
594 add_wait_queue_exclusive(&fc->waitq, &wait);
595 while (fc->mounted && list_empty(&fc->pending)) {
596 set_current_state(TASK_INTERRUPTIBLE);
597 if (signal_pending(current))
598 break;
599
600 spin_unlock(&fuse_lock);
601 schedule();
602 spin_lock(&fuse_lock);
603 }
604 set_current_state(TASK_RUNNING);
605 remove_wait_queue(&fc->waitq, &wait);
606 }
607
608 /*
609 * Read a single request into the userspace filesystem's buffer. This
610 * function waits until a request is available, then removes it from
611 * the pending list and copies request data to userspace buffer. If
612 * no reply is needed (FORGET) or request has been interrupted or
613 * there was an error during the copying then it's finished by calling
614 * request_end(). Otherwise add it to the processing list, and set
615 * the 'sent' flag.
616 */
617 static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
618 unsigned long nr_segs, loff_t *off)
619 {
620 int err;
621 struct fuse_conn *fc;
622 struct fuse_req *req;
623 struct fuse_in *in;
624 struct fuse_copy_state cs;
625 unsigned reqsize;
626
627 restart:
628 spin_lock(&fuse_lock);
629 fc = file->private_data;
630 err = -EPERM;
631 if (!fc)
632 goto err_unlock;
633 request_wait(fc);
634 err = -ENODEV;
635 if (!fc->mounted)
636 goto err_unlock;
637 err = -ERESTARTSYS;
638 if (list_empty(&fc->pending))
639 goto err_unlock;
640
641 req = list_entry(fc->pending.next, struct fuse_req, list);
642 list_del_init(&req->list);
643
644 in = &req->in;
645 reqsize = in->h.len;
646 /* If request is too large, reply with an error and restart the read */
647 if (iov_length(iov, nr_segs) < reqsize) {
648 req->out.h.error = -EIO;
649 /* SETXATTR is special, since it may contain too large data */
650 if (in->h.opcode == FUSE_SETXATTR)
651 req->out.h.error = -E2BIG;
652 request_end(fc, req);
653 goto restart;
654 }
655 spin_unlock(&fuse_lock);
656 fuse_copy_init(&cs, 1, req, iov, nr_segs);
657 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
658 if (!err)
659 err = fuse_copy_args(&cs, in->numargs, in->argpages,
660 (struct fuse_arg *) in->args, 0);
661 fuse_copy_finish(&cs);
662 spin_lock(&fuse_lock);
663 req->locked = 0;
664 if (!err && req->interrupted)
665 err = -ENOENT;
666 if (err) {
667 if (!req->interrupted)
668 req->out.h.error = -EIO;
669 request_end(fc, req);
670 return err;
671 }
672 if (!req->isreply)
673 request_end(fc, req);
674 else {
675 req->sent = 1;
676 list_add_tail(&req->list, &fc->processing);
677 spin_unlock(&fuse_lock);
678 }
679 return reqsize;
680
681 err_unlock:
682 spin_unlock(&fuse_lock);
683 return err;
684 }
685
686 static ssize_t fuse_dev_read(struct file *file, char __user *buf,
687 size_t nbytes, loff_t *off)
688 {
689 struct iovec iov;
690 iov.iov_len = nbytes;
691 iov.iov_base = buf;
692 return fuse_dev_readv(file, &iov, 1, off);
693 }
694
695 /* Look up request on processing list by unique ID */
696 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
697 {
698 struct list_head *entry;
699
700 list_for_each(entry, &fc->processing) {
701 struct fuse_req *req;
702 req = list_entry(entry, struct fuse_req, list);
703 if (req->in.h.unique == unique)
704 return req;
705 }
706 return NULL;
707 }
708
709 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
710 unsigned nbytes)
711 {
712 unsigned reqsize = sizeof(struct fuse_out_header);
713
714 if (out->h.error)
715 return nbytes != reqsize ? -EINVAL : 0;
716
717 reqsize += len_args(out->numargs, out->args);
718
719 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
720 return -EINVAL;
721 else if (reqsize > nbytes) {
722 struct fuse_arg *lastarg = &out->args[out->numargs-1];
723 unsigned diffsize = reqsize - nbytes;
724 if (diffsize > lastarg->size)
725 return -EINVAL;
726 lastarg->size -= diffsize;
727 }
728 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
729 out->page_zeroing);
730 }
731
732 /*
733 * Write a single reply to a request. First the header is copied from
734 * the write buffer. The request is then searched on the processing
735 * list by the unique ID found in the header. If found, then remove
736 * it from the list and copy the rest of the buffer to the request.
737 * The request is finished by calling request_end()
738 */
739 static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
740 unsigned long nr_segs, loff_t *off)
741 {
742 int err;
743 unsigned nbytes = iov_length(iov, nr_segs);
744 struct fuse_req *req;
745 struct fuse_out_header oh;
746 struct fuse_copy_state cs;
747 struct fuse_conn *fc = fuse_get_conn(file);
748 if (!fc)
749 return -ENODEV;
750
751 fuse_copy_init(&cs, 0, NULL, iov, nr_segs);
752 if (nbytes < sizeof(struct fuse_out_header))
753 return -EINVAL;
754
755 err = fuse_copy_one(&cs, &oh, sizeof(oh));
756 if (err)
757 goto err_finish;
758 err = -EINVAL;
759 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
760 oh.len != nbytes)
761 goto err_finish;
762
763 spin_lock(&fuse_lock);
764 req = request_find(fc, oh.unique);
765 err = -EINVAL;
766 if (!req)
767 goto err_unlock;
768
769 list_del_init(&req->list);
770 if (req->interrupted) {
771 spin_unlock(&fuse_lock);
772 fuse_copy_finish(&cs);
773 spin_lock(&fuse_lock);
774 request_end(fc, req);
775 return -ENOENT;
776 }
777 req->out.h = oh;
778 req->locked = 1;
779 cs.req = req;
780 spin_unlock(&fuse_lock);
781
782 err = copy_out_args(&cs, &req->out, nbytes);
783 fuse_copy_finish(&cs);
784
785 spin_lock(&fuse_lock);
786 req->locked = 0;
787 if (!err) {
788 if (req->interrupted)
789 err = -ENOENT;
790 } else if (!req->interrupted)
791 req->out.h.error = -EIO;
792 request_end(fc, req);
793
794 return err ? err : nbytes;
795
796 err_unlock:
797 spin_unlock(&fuse_lock);
798 err_finish:
799 fuse_copy_finish(&cs);
800 return err;
801 }
802
803 static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
804 size_t nbytes, loff_t *off)
805 {
806 struct iovec iov;
807 iov.iov_len = nbytes;
808 iov.iov_base = (char __user *) buf;
809 return fuse_dev_writev(file, &iov, 1, off);
810 }
811
812 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
813 {
814 struct fuse_conn *fc = fuse_get_conn(file);
815 unsigned mask = POLLOUT | POLLWRNORM;
816
817 if (!fc)
818 return -ENODEV;
819
820 poll_wait(file, &fc->waitq, wait);
821
822 spin_lock(&fuse_lock);
823 if (!list_empty(&fc->pending))
824 mask |= POLLIN | POLLRDNORM;
825 spin_unlock(&fuse_lock);
826
827 return mask;
828 }
829
830 /* Abort all requests on the given list (pending or processing) */
831 static void end_requests(struct fuse_conn *fc, struct list_head *head)
832 {
833 while (!list_empty(head)) {
834 struct fuse_req *req;
835 req = list_entry(head->next, struct fuse_req, list);
836 list_del_init(&req->list);
837 req->out.h.error = -ECONNABORTED;
838 request_end(fc, req);
839 spin_lock(&fuse_lock);
840 }
841 }
842
843 static int fuse_dev_release(struct inode *inode, struct file *file)
844 {
845 struct fuse_conn *fc;
846
847 spin_lock(&fuse_lock);
848 fc = file->private_data;
849 if (fc) {
850 fc->connected = 0;
851 end_requests(fc, &fc->pending);
852 end_requests(fc, &fc->processing);
853 fuse_release_conn(fc);
854 }
855 spin_unlock(&fuse_lock);
856 return 0;
857 }
858
859 struct file_operations fuse_dev_operations = {
860 .owner = THIS_MODULE,
861 .llseek = no_llseek,
862 .read = fuse_dev_read,
863 .readv = fuse_dev_readv,
864 .write = fuse_dev_write,
865 .writev = fuse_dev_writev,
866 .poll = fuse_dev_poll,
867 .release = fuse_dev_release,
868 };
869
870 static struct miscdevice fuse_miscdevice = {
871 .minor = FUSE_MINOR,
872 .name = "fuse",
873 .fops = &fuse_dev_operations,
874 };
875
876 int __init fuse_dev_init(void)
877 {
878 int err = -ENOMEM;
879 fuse_req_cachep = kmem_cache_create("fuse_request",
880 sizeof(struct fuse_req),
881 0, 0, NULL, NULL);
882 if (!fuse_req_cachep)
883 goto out;
884
885 err = misc_register(&fuse_miscdevice);
886 if (err)
887 goto out_cache_clean;
888
889 return 0;
890
891 out_cache_clean:
892 kmem_cache_destroy(fuse_req_cachep);
893 out:
894 return err;
895 }
896
897 void fuse_dev_cleanup(void)
898 {
899 misc_deregister(&fuse_miscdevice);
900 kmem_cache_destroy(fuse_req_cachep);
901 }