]>
Commit | Line | Data |
---|---|---|
334f485d MS |
1 | /* |
2 | FUSE: Filesystem in Userspace | |
3 | Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu> | |
4 | ||
5 | This program can be distributed under the terms of the GNU GPL. | |
6 | See the file COPYING. | |
7 | */ | |
8 | ||
9 | #include "fuse_i.h" | |
10 | ||
11 | #include <linux/init.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/poll.h> | |
14 | #include <linux/uio.h> | |
15 | #include <linux/miscdevice.h> | |
16 | #include <linux/pagemap.h> | |
17 | #include <linux/file.h> | |
18 | #include <linux/slab.h> | |
19 | ||
20 | MODULE_ALIAS_MISCDEV(FUSE_MINOR); | |
21 | ||
22 | static kmem_cache_t *fuse_req_cachep; | |
23 | ||
8bfc016d | 24 | static struct fuse_conn *fuse_get_conn(struct file *file) |
334f485d MS |
25 | { |
26 | struct fuse_conn *fc; | |
27 | spin_lock(&fuse_lock); | |
28 | fc = file->private_data; | |
9ba7cbba | 29 | if (fc && !fc->connected) |
334f485d MS |
30 | fc = NULL; |
31 | spin_unlock(&fuse_lock); | |
32 | return fc; | |
33 | } | |
34 | ||
8bfc016d | 35 | static void fuse_request_init(struct fuse_req *req) |
334f485d MS |
36 | { |
37 | memset(req, 0, sizeof(*req)); | |
38 | INIT_LIST_HEAD(&req->list); | |
39 | init_waitqueue_head(&req->waitq); | |
40 | atomic_set(&req->count, 1); | |
41 | } | |
42 | ||
43 | struct fuse_req *fuse_request_alloc(void) | |
44 | { | |
45 | struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL); | |
46 | if (req) | |
47 | fuse_request_init(req); | |
48 | return req; | |
49 | } | |
50 | ||
51 | void fuse_request_free(struct fuse_req *req) | |
52 | { | |
53 | kmem_cache_free(fuse_req_cachep, req); | |
54 | } | |
55 | ||
8bfc016d | 56 | static void block_sigs(sigset_t *oldset) |
334f485d MS |
57 | { |
58 | sigset_t mask; | |
59 | ||
60 | siginitsetinv(&mask, sigmask(SIGKILL)); | |
61 | sigprocmask(SIG_BLOCK, &mask, oldset); | |
62 | } | |
63 | ||
8bfc016d | 64 | static void restore_sigs(sigset_t *oldset) |
334f485d MS |
65 | { |
66 | sigprocmask(SIG_SETMASK, oldset, NULL); | |
67 | } | |
68 | ||
69 | void fuse_reset_request(struct fuse_req *req) | |
70 | { | |
71 | int preallocated = req->preallocated; | |
72 | BUG_ON(atomic_read(&req->count) != 1); | |
73 | fuse_request_init(req); | |
74 | req->preallocated = preallocated; | |
75 | } | |
76 | ||
77 | static void __fuse_get_request(struct fuse_req *req) | |
78 | { | |
79 | atomic_inc(&req->count); | |
80 | } | |
81 | ||
82 | /* Must be called with > 1 refcount */ | |
83 | static void __fuse_put_request(struct fuse_req *req) | |
84 | { | |
85 | BUG_ON(atomic_read(&req->count) < 2); | |
86 | atomic_dec(&req->count); | |
87 | } | |
88 | ||
89 | static struct fuse_req *do_get_request(struct fuse_conn *fc) | |
90 | { | |
91 | struct fuse_req *req; | |
92 | ||
93 | spin_lock(&fuse_lock); | |
94 | BUG_ON(list_empty(&fc->unused_list)); | |
95 | req = list_entry(fc->unused_list.next, struct fuse_req, list); | |
96 | list_del_init(&req->list); | |
97 | spin_unlock(&fuse_lock); | |
98 | fuse_request_init(req); | |
99 | req->preallocated = 1; | |
100 | req->in.h.uid = current->fsuid; | |
101 | req->in.h.gid = current->fsgid; | |
102 | req->in.h.pid = current->pid; | |
103 | return req; | |
104 | } | |
105 | ||
7c352bdf | 106 | /* This can return NULL, but only in case it's interrupted by a SIGKILL */ |
334f485d | 107 | struct fuse_req *fuse_get_request(struct fuse_conn *fc) |
334f485d MS |
108 | { |
109 | int intr; | |
110 | sigset_t oldset; | |
111 | ||
0cd5b885 | 112 | atomic_inc(&fc->num_waiting); |
334f485d MS |
113 | block_sigs(&oldset); |
114 | intr = down_interruptible(&fc->outstanding_sem); | |
115 | restore_sigs(&oldset); | |
0cd5b885 MS |
116 | if (intr) { |
117 | atomic_dec(&fc->num_waiting); | |
118 | return NULL; | |
119 | } | |
120 | return do_get_request(fc); | |
334f485d MS |
121 | } |
122 | ||
123 | static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) | |
124 | { | |
125 | spin_lock(&fuse_lock); | |
0cd5b885 MS |
126 | if (req->preallocated) { |
127 | atomic_dec(&fc->num_waiting); | |
334f485d | 128 | list_add(&req->list, &fc->unused_list); |
0cd5b885 | 129 | } else |
334f485d MS |
130 | fuse_request_free(req); |
131 | ||
132 | /* If we are in debt decrease that first */ | |
133 | if (fc->outstanding_debt) | |
134 | fc->outstanding_debt--; | |
135 | else | |
136 | up(&fc->outstanding_sem); | |
137 | spin_unlock(&fuse_lock); | |
138 | } | |
139 | ||
140 | void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) | |
141 | { | |
142 | if (atomic_dec_and_test(&req->count)) | |
143 | fuse_putback_request(fc, req); | |
144 | } | |
145 | ||
1e9a4ed9 MS |
146 | void fuse_release_background(struct fuse_req *req) |
147 | { | |
148 | iput(req->inode); | |
149 | iput(req->inode2); | |
150 | if (req->file) | |
151 | fput(req->file); | |
152 | spin_lock(&fuse_lock); | |
153 | list_del(&req->bg_entry); | |
154 | spin_unlock(&fuse_lock); | |
155 | } | |
156 | ||
3ec870d5 MS |
157 | static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) |
158 | { | |
159 | int i; | |
160 | struct fuse_init_out *arg = &req->misc.init_out; | |
161 | ||
b3bebd94 | 162 | if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION) |
3ec870d5 MS |
163 | fc->conn_error = 1; |
164 | else { | |
165 | fc->minor = arg->minor; | |
166 | fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; | |
167 | } | |
168 | ||
169 | /* After INIT reply is received other requests can go | |
170 | out. So do (FUSE_MAX_OUTSTANDING - 1) number of | |
171 | up()s on outstanding_sem. The last up() is done in | |
172 | fuse_putback_request() */ | |
173 | for (i = 1; i < FUSE_MAX_OUTSTANDING; i++) | |
174 | up(&fc->outstanding_sem); | |
175 | } | |
176 | ||
334f485d MS |
177 | /* |
178 | * This function is called when a request is finished. Either a reply | |
179 | * has arrived or it was interrupted (and not yet sent) or some error | |
f43b155a MS |
180 | * occurred during communication with userspace, or the device file |
181 | * was closed. In case of a background request the reference to the | |
182 | * stored objects are released. The requester thread is woken up (if | |
183 | * still waiting), and finally the reference to the request is | |
184 | * released | |
334f485d MS |
185 | * |
186 | * Called with fuse_lock, unlocks it | |
187 | */ | |
188 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) | |
189 | { | |
d77a1d5b | 190 | list_del(&req->list); |
83cfd493 | 191 | req->state = FUSE_REQ_FINISHED; |
334f485d MS |
192 | spin_unlock(&fuse_lock); |
193 | if (req->background) { | |
1e9a4ed9 MS |
194 | down_read(&fc->sbput_sem); |
195 | if (fc->mounted) | |
196 | fuse_release_background(req); | |
197 | up_read(&fc->sbput_sem); | |
334f485d MS |
198 | } |
199 | wake_up(&req->waitq); | |
3ec870d5 MS |
200 | if (req->in.h.opcode == FUSE_INIT) |
201 | process_init_reply(fc, req); | |
202 | else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) { | |
fd72faac MS |
203 | /* Special case for failed iget in CREATE */ |
204 | u64 nodeid = req->in.h.nodeid; | |
fd72faac MS |
205 | fuse_reset_request(req); |
206 | fuse_send_forget(fc, req, nodeid, 1); | |
f43b155a | 207 | return; |
334f485d | 208 | } |
f43b155a | 209 | fuse_put_request(fc, req); |
334f485d MS |
210 | } |
211 | ||
1e9a4ed9 MS |
212 | /* |
213 | * Unfortunately request interruption not just solves the deadlock | |
214 | * problem, it causes problems too. These stem from the fact, that an | |
215 | * interrupted request is continued to be processed in userspace, | |
216 | * while all the locks and object references (inode and file) held | |
217 | * during the operation are released. | |
218 | * | |
219 | * To release the locks is exactly why there's a need to interrupt the | |
220 | * request, so there's not a lot that can be done about this, except | |
221 | * introduce additional locking in userspace. | |
222 | * | |
223 | * More important is to keep inode and file references until userspace | |
224 | * has replied, otherwise FORGET and RELEASE could be sent while the | |
225 | * inode/file is still used by the filesystem. | |
226 | * | |
227 | * For this reason the concept of "background" request is introduced. | |
228 | * An interrupted request is backgrounded if it has been already sent | |
229 | * to userspace. Backgrounding involves getting an extra reference to | |
230 | * inode(s) or file used in the request, and adding the request to | |
231 | * fc->background list. When a reply is received for a background | |
232 | * request, the object references are released, and the request is | |
233 | * removed from the list. If the filesystem is unmounted while there | |
234 | * are still background requests, the list is walked and references | |
235 | * are released as if a reply was received. | |
236 | * | |
237 | * There's one more use for a background request. The RELEASE message is | |
238 | * always sent as background, since it doesn't return an error or | |
239 | * data. | |
240 | */ | |
241 | static void background_request(struct fuse_conn *fc, struct fuse_req *req) | |
334f485d | 242 | { |
334f485d | 243 | req->background = 1; |
1e9a4ed9 | 244 | list_add(&req->bg_entry, &fc->background); |
334f485d MS |
245 | if (req->inode) |
246 | req->inode = igrab(req->inode); | |
247 | if (req->inode2) | |
248 | req->inode2 = igrab(req->inode2); | |
249 | if (req->file) | |
250 | get_file(req->file); | |
251 | } | |
252 | ||
334f485d | 253 | /* Called with fuse_lock held. Releases, and then reacquires it. */ |
7c352bdf | 254 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) |
334f485d | 255 | { |
7c352bdf | 256 | sigset_t oldset; |
334f485d MS |
257 | |
258 | spin_unlock(&fuse_lock); | |
7c352bdf | 259 | block_sigs(&oldset); |
83cfd493 | 260 | wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); |
7c352bdf | 261 | restore_sigs(&oldset); |
334f485d | 262 | spin_lock(&fuse_lock); |
69a53bf2 | 263 | if (req->state == FUSE_REQ_FINISHED && !req->interrupted) |
334f485d MS |
264 | return; |
265 | ||
69a53bf2 MS |
266 | if (!req->interrupted) { |
267 | req->out.h.error = -EINTR; | |
268 | req->interrupted = 1; | |
269 | } | |
334f485d MS |
270 | if (req->locked) { |
271 | /* This is uninterruptible sleep, because data is | |
272 | being copied to/from the buffers of req. During | |
273 | locked state, there mustn't be any filesystem | |
274 | operation (e.g. page fault), since that could lead | |
275 | to deadlock */ | |
276 | spin_unlock(&fuse_lock); | |
277 | wait_event(req->waitq, !req->locked); | |
278 | spin_lock(&fuse_lock); | |
279 | } | |
83cfd493 | 280 | if (req->state == FUSE_REQ_PENDING) { |
334f485d MS |
281 | list_del(&req->list); |
282 | __fuse_put_request(req); | |
83cfd493 | 283 | } else if (req->state == FUSE_REQ_SENT) |
1e9a4ed9 | 284 | background_request(fc, req); |
334f485d MS |
285 | } |
286 | ||
287 | static unsigned len_args(unsigned numargs, struct fuse_arg *args) | |
288 | { | |
289 | unsigned nbytes = 0; | |
290 | unsigned i; | |
291 | ||
292 | for (i = 0; i < numargs; i++) | |
293 | nbytes += args[i].size; | |
294 | ||
295 | return nbytes; | |
296 | } | |
297 | ||
298 | static void queue_request(struct fuse_conn *fc, struct fuse_req *req) | |
299 | { | |
300 | fc->reqctr++; | |
301 | /* zero is special */ | |
302 | if (fc->reqctr == 0) | |
303 | fc->reqctr = 1; | |
304 | req->in.h.unique = fc->reqctr; | |
305 | req->in.h.len = sizeof(struct fuse_in_header) + | |
306 | len_args(req->in.numargs, (struct fuse_arg *) req->in.args); | |
307 | if (!req->preallocated) { | |
308 | /* If request is not preallocated (either FORGET or | |
309 | RELEASE), then still decrease outstanding_sem, so | |
310 | user can't open infinite number of files while not | |
311 | processing the RELEASE requests. However for | |
312 | efficiency do it without blocking, so if down() | |
313 | would block, just increase the debt instead */ | |
314 | if (down_trylock(&fc->outstanding_sem)) | |
315 | fc->outstanding_debt++; | |
316 | } | |
317 | list_add_tail(&req->list, &fc->pending); | |
83cfd493 | 318 | req->state = FUSE_REQ_PENDING; |
334f485d MS |
319 | wake_up(&fc->waitq); |
320 | } | |
321 | ||
7c352bdf MS |
322 | /* |
323 | * This can only be interrupted by a SIGKILL | |
324 | */ | |
325 | void request_send(struct fuse_conn *fc, struct fuse_req *req) | |
334f485d MS |
326 | { |
327 | req->isreply = 1; | |
328 | spin_lock(&fuse_lock); | |
1e9a4ed9 | 329 | if (!fc->connected) |
334f485d MS |
330 | req->out.h.error = -ENOTCONN; |
331 | else if (fc->conn_error) | |
332 | req->out.h.error = -ECONNREFUSED; | |
333 | else { | |
334 | queue_request(fc, req); | |
335 | /* acquire extra reference, since request is still needed | |
336 | after request_end() */ | |
337 | __fuse_get_request(req); | |
338 | ||
7c352bdf | 339 | request_wait_answer(fc, req); |
334f485d MS |
340 | } |
341 | spin_unlock(&fuse_lock); | |
342 | } | |
343 | ||
334f485d MS |
344 | static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) |
345 | { | |
346 | spin_lock(&fuse_lock); | |
1e9a4ed9 | 347 | if (fc->connected) { |
334f485d MS |
348 | queue_request(fc, req); |
349 | spin_unlock(&fuse_lock); | |
350 | } else { | |
351 | req->out.h.error = -ENOTCONN; | |
352 | request_end(fc, req); | |
353 | } | |
354 | } | |
355 | ||
356 | void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req) | |
357 | { | |
358 | req->isreply = 0; | |
359 | request_send_nowait(fc, req); | |
360 | } | |
361 | ||
362 | void request_send_background(struct fuse_conn *fc, struct fuse_req *req) | |
363 | { | |
364 | req->isreply = 1; | |
1e9a4ed9 MS |
365 | spin_lock(&fuse_lock); |
366 | background_request(fc, req); | |
367 | spin_unlock(&fuse_lock); | |
334f485d MS |
368 | request_send_nowait(fc, req); |
369 | } | |
370 | ||
371 | void fuse_send_init(struct fuse_conn *fc) | |
372 | { | |
373 | /* This is called from fuse_read_super() so there's guaranteed | |
6383bdaa MS |
374 | to be exactly one request available */ |
375 | struct fuse_req *req = fuse_get_request(fc); | |
3ec870d5 | 376 | struct fuse_init_in *arg = &req->misc.init_in; |
334f485d MS |
377 | arg->major = FUSE_KERNEL_VERSION; |
378 | arg->minor = FUSE_KERNEL_MINOR_VERSION; | |
379 | req->in.h.opcode = FUSE_INIT; | |
380 | req->in.numargs = 1; | |
381 | req->in.args[0].size = sizeof(*arg); | |
382 | req->in.args[0].value = arg; | |
383 | req->out.numargs = 1; | |
3ec870d5 MS |
384 | /* Variable length arguement used for backward compatibility |
385 | with interface version < 7.5. Rest of init_out is zeroed | |
386 | by do_get_request(), so a short reply is not a problem */ | |
387 | req->out.argvar = 1; | |
388 | req->out.args[0].size = sizeof(struct fuse_init_out); | |
389 | req->out.args[0].value = &req->misc.init_out; | |
334f485d MS |
390 | request_send_background(fc, req); |
391 | } | |
392 | ||
393 | /* | |
394 | * Lock the request. Up to the next unlock_request() there mustn't be | |
395 | * anything that could cause a page-fault. If the request was already | |
396 | * interrupted bail out. | |
397 | */ | |
8bfc016d | 398 | static int lock_request(struct fuse_req *req) |
334f485d MS |
399 | { |
400 | int err = 0; | |
401 | if (req) { | |
402 | spin_lock(&fuse_lock); | |
403 | if (req->interrupted) | |
404 | err = -ENOENT; | |
405 | else | |
406 | req->locked = 1; | |
407 | spin_unlock(&fuse_lock); | |
408 | } | |
409 | return err; | |
410 | } | |
411 | ||
412 | /* | |
413 | * Unlock request. If it was interrupted during being locked, the | |
414 | * requester thread is currently waiting for it to be unlocked, so | |
415 | * wake it up. | |
416 | */ | |
8bfc016d | 417 | static void unlock_request(struct fuse_req *req) |
334f485d MS |
418 | { |
419 | if (req) { | |
420 | spin_lock(&fuse_lock); | |
421 | req->locked = 0; | |
422 | if (req->interrupted) | |
423 | wake_up(&req->waitq); | |
424 | spin_unlock(&fuse_lock); | |
425 | } | |
426 | } | |
427 | ||
428 | struct fuse_copy_state { | |
429 | int write; | |
430 | struct fuse_req *req; | |
431 | const struct iovec *iov; | |
432 | unsigned long nr_segs; | |
433 | unsigned long seglen; | |
434 | unsigned long addr; | |
435 | struct page *pg; | |
436 | void *mapaddr; | |
437 | void *buf; | |
438 | unsigned len; | |
439 | }; | |
440 | ||
441 | static void fuse_copy_init(struct fuse_copy_state *cs, int write, | |
442 | struct fuse_req *req, const struct iovec *iov, | |
443 | unsigned long nr_segs) | |
444 | { | |
445 | memset(cs, 0, sizeof(*cs)); | |
446 | cs->write = write; | |
447 | cs->req = req; | |
448 | cs->iov = iov; | |
449 | cs->nr_segs = nr_segs; | |
450 | } | |
451 | ||
452 | /* Unmap and put previous page of userspace buffer */ | |
8bfc016d | 453 | static void fuse_copy_finish(struct fuse_copy_state *cs) |
334f485d MS |
454 | { |
455 | if (cs->mapaddr) { | |
456 | kunmap_atomic(cs->mapaddr, KM_USER0); | |
457 | if (cs->write) { | |
458 | flush_dcache_page(cs->pg); | |
459 | set_page_dirty_lock(cs->pg); | |
460 | } | |
461 | put_page(cs->pg); | |
462 | cs->mapaddr = NULL; | |
463 | } | |
464 | } | |
465 | ||
466 | /* | |
467 | * Get another pagefull of userspace buffer, and map it to kernel | |
468 | * address space, and lock request | |
469 | */ | |
470 | static int fuse_copy_fill(struct fuse_copy_state *cs) | |
471 | { | |
472 | unsigned long offset; | |
473 | int err; | |
474 | ||
475 | unlock_request(cs->req); | |
476 | fuse_copy_finish(cs); | |
477 | if (!cs->seglen) { | |
478 | BUG_ON(!cs->nr_segs); | |
479 | cs->seglen = cs->iov[0].iov_len; | |
480 | cs->addr = (unsigned long) cs->iov[0].iov_base; | |
481 | cs->iov ++; | |
482 | cs->nr_segs --; | |
483 | } | |
484 | down_read(¤t->mm->mmap_sem); | |
485 | err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0, | |
486 | &cs->pg, NULL); | |
487 | up_read(¤t->mm->mmap_sem); | |
488 | if (err < 0) | |
489 | return err; | |
490 | BUG_ON(err != 1); | |
491 | offset = cs->addr % PAGE_SIZE; | |
492 | cs->mapaddr = kmap_atomic(cs->pg, KM_USER0); | |
493 | cs->buf = cs->mapaddr + offset; | |
494 | cs->len = min(PAGE_SIZE - offset, cs->seglen); | |
495 | cs->seglen -= cs->len; | |
496 | cs->addr += cs->len; | |
497 | ||
498 | return lock_request(cs->req); | |
499 | } | |
500 | ||
501 | /* Do as much copy to/from userspace buffer as we can */ | |
8bfc016d | 502 | static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) |
334f485d MS |
503 | { |
504 | unsigned ncpy = min(*size, cs->len); | |
505 | if (val) { | |
506 | if (cs->write) | |
507 | memcpy(cs->buf, *val, ncpy); | |
508 | else | |
509 | memcpy(*val, cs->buf, ncpy); | |
510 | *val += ncpy; | |
511 | } | |
512 | *size -= ncpy; | |
513 | cs->len -= ncpy; | |
514 | cs->buf += ncpy; | |
515 | return ncpy; | |
516 | } | |
517 | ||
518 | /* | |
519 | * Copy a page in the request to/from the userspace buffer. Must be | |
520 | * done atomically | |
521 | */ | |
8bfc016d MS |
522 | static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, |
523 | unsigned offset, unsigned count, int zeroing) | |
334f485d MS |
524 | { |
525 | if (page && zeroing && count < PAGE_SIZE) { | |
526 | void *mapaddr = kmap_atomic(page, KM_USER1); | |
527 | memset(mapaddr, 0, PAGE_SIZE); | |
528 | kunmap_atomic(mapaddr, KM_USER1); | |
529 | } | |
530 | while (count) { | |
531 | int err; | |
532 | if (!cs->len && (err = fuse_copy_fill(cs))) | |
533 | return err; | |
534 | if (page) { | |
535 | void *mapaddr = kmap_atomic(page, KM_USER1); | |
536 | void *buf = mapaddr + offset; | |
537 | offset += fuse_copy_do(cs, &buf, &count); | |
538 | kunmap_atomic(mapaddr, KM_USER1); | |
539 | } else | |
540 | offset += fuse_copy_do(cs, NULL, &count); | |
541 | } | |
542 | if (page && !cs->write) | |
543 | flush_dcache_page(page); | |
544 | return 0; | |
545 | } | |
546 | ||
547 | /* Copy pages in the request to/from userspace buffer */ | |
548 | static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, | |
549 | int zeroing) | |
550 | { | |
551 | unsigned i; | |
552 | struct fuse_req *req = cs->req; | |
553 | unsigned offset = req->page_offset; | |
554 | unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset); | |
555 | ||
556 | for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { | |
557 | struct page *page = req->pages[i]; | |
558 | int err = fuse_copy_page(cs, page, offset, count, zeroing); | |
559 | if (err) | |
560 | return err; | |
561 | ||
562 | nbytes -= count; | |
563 | count = min(nbytes, (unsigned) PAGE_SIZE); | |
564 | offset = 0; | |
565 | } | |
566 | return 0; | |
567 | } | |
568 | ||
569 | /* Copy a single argument in the request to/from userspace buffer */ | |
570 | static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) | |
571 | { | |
572 | while (size) { | |
573 | int err; | |
574 | if (!cs->len && (err = fuse_copy_fill(cs))) | |
575 | return err; | |
576 | fuse_copy_do(cs, &val, &size); | |
577 | } | |
578 | return 0; | |
579 | } | |
580 | ||
581 | /* Copy request arguments to/from userspace buffer */ | |
582 | static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, | |
583 | unsigned argpages, struct fuse_arg *args, | |
584 | int zeroing) | |
585 | { | |
586 | int err = 0; | |
587 | unsigned i; | |
588 | ||
589 | for (i = 0; !err && i < numargs; i++) { | |
590 | struct fuse_arg *arg = &args[i]; | |
591 | if (i == numargs - 1 && argpages) | |
592 | err = fuse_copy_pages(cs, arg->size, zeroing); | |
593 | else | |
594 | err = fuse_copy_one(cs, arg->value, arg->size); | |
595 | } | |
596 | return err; | |
597 | } | |
598 | ||
599 | /* Wait until a request is available on the pending list */ | |
600 | static void request_wait(struct fuse_conn *fc) | |
601 | { | |
602 | DECLARE_WAITQUEUE(wait, current); | |
603 | ||
604 | add_wait_queue_exclusive(&fc->waitq, &wait); | |
9ba7cbba | 605 | while (fc->connected && list_empty(&fc->pending)) { |
334f485d MS |
606 | set_current_state(TASK_INTERRUPTIBLE); |
607 | if (signal_pending(current)) | |
608 | break; | |
609 | ||
610 | spin_unlock(&fuse_lock); | |
611 | schedule(); | |
612 | spin_lock(&fuse_lock); | |
613 | } | |
614 | set_current_state(TASK_RUNNING); | |
615 | remove_wait_queue(&fc->waitq, &wait); | |
616 | } | |
617 | ||
618 | /* | |
619 | * Read a single request into the userspace filesystem's buffer. This | |
620 | * function waits until a request is available, then removes it from | |
621 | * the pending list and copies request data to userspace buffer. If | |
622 | * no reply is needed (FORGET) or request has been interrupted or | |
623 | * there was an error during the copying then it's finished by calling | |
624 | * request_end(). Otherwise add it to the processing list, and set | |
625 | * the 'sent' flag. | |
626 | */ | |
627 | static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, | |
628 | unsigned long nr_segs, loff_t *off) | |
629 | { | |
630 | int err; | |
631 | struct fuse_conn *fc; | |
632 | struct fuse_req *req; | |
633 | struct fuse_in *in; | |
634 | struct fuse_copy_state cs; | |
635 | unsigned reqsize; | |
636 | ||
1d3d752b | 637 | restart: |
334f485d MS |
638 | spin_lock(&fuse_lock); |
639 | fc = file->private_data; | |
640 | err = -EPERM; | |
641 | if (!fc) | |
642 | goto err_unlock; | |
643 | request_wait(fc); | |
644 | err = -ENODEV; | |
9ba7cbba | 645 | if (!fc->connected) |
334f485d MS |
646 | goto err_unlock; |
647 | err = -ERESTARTSYS; | |
648 | if (list_empty(&fc->pending)) | |
649 | goto err_unlock; | |
650 | ||
651 | req = list_entry(fc->pending.next, struct fuse_req, list); | |
83cfd493 | 652 | req->state = FUSE_REQ_READING; |
d77a1d5b | 653 | list_move(&req->list, &fc->io); |
334f485d MS |
654 | |
655 | in = &req->in; | |
1d3d752b MS |
656 | reqsize = in->h.len; |
657 | /* If request is too large, reply with an error and restart the read */ | |
658 | if (iov_length(iov, nr_segs) < reqsize) { | |
659 | req->out.h.error = -EIO; | |
660 | /* SETXATTR is special, since it may contain too large data */ | |
661 | if (in->h.opcode == FUSE_SETXATTR) | |
662 | req->out.h.error = -E2BIG; | |
663 | request_end(fc, req); | |
664 | goto restart; | |
334f485d | 665 | } |
1d3d752b MS |
666 | spin_unlock(&fuse_lock); |
667 | fuse_copy_init(&cs, 1, req, iov, nr_segs); | |
668 | err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); | |
669 | if (!err) | |
670 | err = fuse_copy_args(&cs, in->numargs, in->argpages, | |
671 | (struct fuse_arg *) in->args, 0); | |
334f485d | 672 | fuse_copy_finish(&cs); |
334f485d MS |
673 | spin_lock(&fuse_lock); |
674 | req->locked = 0; | |
675 | if (!err && req->interrupted) | |
676 | err = -ENOENT; | |
677 | if (err) { | |
678 | if (!req->interrupted) | |
679 | req->out.h.error = -EIO; | |
680 | request_end(fc, req); | |
681 | return err; | |
682 | } | |
683 | if (!req->isreply) | |
684 | request_end(fc, req); | |
685 | else { | |
83cfd493 | 686 | req->state = FUSE_REQ_SENT; |
d77a1d5b | 687 | list_move_tail(&req->list, &fc->processing); |
334f485d MS |
688 | spin_unlock(&fuse_lock); |
689 | } | |
690 | return reqsize; | |
691 | ||
692 | err_unlock: | |
693 | spin_unlock(&fuse_lock); | |
694 | return err; | |
695 | } | |
696 | ||
697 | static ssize_t fuse_dev_read(struct file *file, char __user *buf, | |
698 | size_t nbytes, loff_t *off) | |
699 | { | |
700 | struct iovec iov; | |
701 | iov.iov_len = nbytes; | |
702 | iov.iov_base = buf; | |
703 | return fuse_dev_readv(file, &iov, 1, off); | |
704 | } | |
705 | ||
706 | /* Look up request on processing list by unique ID */ | |
707 | static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) | |
708 | { | |
709 | struct list_head *entry; | |
710 | ||
711 | list_for_each(entry, &fc->processing) { | |
712 | struct fuse_req *req; | |
713 | req = list_entry(entry, struct fuse_req, list); | |
714 | if (req->in.h.unique == unique) | |
715 | return req; | |
716 | } | |
717 | return NULL; | |
718 | } | |
719 | ||
720 | static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, | |
721 | unsigned nbytes) | |
722 | { | |
723 | unsigned reqsize = sizeof(struct fuse_out_header); | |
724 | ||
725 | if (out->h.error) | |
726 | return nbytes != reqsize ? -EINVAL : 0; | |
727 | ||
728 | reqsize += len_args(out->numargs, out->args); | |
729 | ||
730 | if (reqsize < nbytes || (reqsize > nbytes && !out->argvar)) | |
731 | return -EINVAL; | |
732 | else if (reqsize > nbytes) { | |
733 | struct fuse_arg *lastarg = &out->args[out->numargs-1]; | |
734 | unsigned diffsize = reqsize - nbytes; | |
735 | if (diffsize > lastarg->size) | |
736 | return -EINVAL; | |
737 | lastarg->size -= diffsize; | |
738 | } | |
739 | return fuse_copy_args(cs, out->numargs, out->argpages, out->args, | |
740 | out->page_zeroing); | |
741 | } | |
742 | ||
743 | /* | |
744 | * Write a single reply to a request. First the header is copied from | |
745 | * the write buffer. The request is then searched on the processing | |
746 | * list by the unique ID found in the header. If found, then remove | |
747 | * it from the list and copy the rest of the buffer to the request. | |
748 | * The request is finished by calling request_end() | |
749 | */ | |
750 | static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, | |
751 | unsigned long nr_segs, loff_t *off) | |
752 | { | |
753 | int err; | |
754 | unsigned nbytes = iov_length(iov, nr_segs); | |
755 | struct fuse_req *req; | |
756 | struct fuse_out_header oh; | |
757 | struct fuse_copy_state cs; | |
758 | struct fuse_conn *fc = fuse_get_conn(file); | |
759 | if (!fc) | |
760 | return -ENODEV; | |
761 | ||
762 | fuse_copy_init(&cs, 0, NULL, iov, nr_segs); | |
763 | if (nbytes < sizeof(struct fuse_out_header)) | |
764 | return -EINVAL; | |
765 | ||
766 | err = fuse_copy_one(&cs, &oh, sizeof(oh)); | |
767 | if (err) | |
768 | goto err_finish; | |
769 | err = -EINVAL; | |
770 | if (!oh.unique || oh.error <= -1000 || oh.error > 0 || | |
771 | oh.len != nbytes) | |
772 | goto err_finish; | |
773 | ||
774 | spin_lock(&fuse_lock); | |
69a53bf2 MS |
775 | err = -ENOENT; |
776 | if (!fc->connected) | |
777 | goto err_unlock; | |
778 | ||
334f485d MS |
779 | req = request_find(fc, oh.unique); |
780 | err = -EINVAL; | |
781 | if (!req) | |
782 | goto err_unlock; | |
783 | ||
334f485d | 784 | if (req->interrupted) { |
222f1d69 | 785 | spin_unlock(&fuse_lock); |
334f485d | 786 | fuse_copy_finish(&cs); |
222f1d69 MS |
787 | spin_lock(&fuse_lock); |
788 | request_end(fc, req); | |
334f485d MS |
789 | return -ENOENT; |
790 | } | |
d77a1d5b | 791 | list_move(&req->list, &fc->io); |
334f485d MS |
792 | req->out.h = oh; |
793 | req->locked = 1; | |
794 | cs.req = req; | |
795 | spin_unlock(&fuse_lock); | |
796 | ||
797 | err = copy_out_args(&cs, &req->out, nbytes); | |
798 | fuse_copy_finish(&cs); | |
799 | ||
800 | spin_lock(&fuse_lock); | |
801 | req->locked = 0; | |
802 | if (!err) { | |
803 | if (req->interrupted) | |
804 | err = -ENOENT; | |
805 | } else if (!req->interrupted) | |
806 | req->out.h.error = -EIO; | |
807 | request_end(fc, req); | |
808 | ||
809 | return err ? err : nbytes; | |
810 | ||
811 | err_unlock: | |
812 | spin_unlock(&fuse_lock); | |
813 | err_finish: | |
814 | fuse_copy_finish(&cs); | |
815 | return err; | |
816 | } | |
817 | ||
818 | static ssize_t fuse_dev_write(struct file *file, const char __user *buf, | |
819 | size_t nbytes, loff_t *off) | |
820 | { | |
821 | struct iovec iov; | |
822 | iov.iov_len = nbytes; | |
823 | iov.iov_base = (char __user *) buf; | |
824 | return fuse_dev_writev(file, &iov, 1, off); | |
825 | } | |
826 | ||
827 | static unsigned fuse_dev_poll(struct file *file, poll_table *wait) | |
828 | { | |
829 | struct fuse_conn *fc = fuse_get_conn(file); | |
830 | unsigned mask = POLLOUT | POLLWRNORM; | |
831 | ||
832 | if (!fc) | |
833 | return -ENODEV; | |
834 | ||
835 | poll_wait(file, &fc->waitq, wait); | |
836 | ||
837 | spin_lock(&fuse_lock); | |
838 | if (!list_empty(&fc->pending)) | |
839 | mask |= POLLIN | POLLRDNORM; | |
840 | spin_unlock(&fuse_lock); | |
841 | ||
842 | return mask; | |
843 | } | |
844 | ||
69a53bf2 MS |
845 | /* |
846 | * Abort all requests on the given list (pending or processing) | |
847 | * | |
848 | * This function releases and reacquires fuse_lock | |
849 | */ | |
334f485d MS |
850 | static void end_requests(struct fuse_conn *fc, struct list_head *head) |
851 | { | |
852 | while (!list_empty(head)) { | |
853 | struct fuse_req *req; | |
854 | req = list_entry(head->next, struct fuse_req, list); | |
334f485d MS |
855 | req->out.h.error = -ECONNABORTED; |
856 | request_end(fc, req); | |
857 | spin_lock(&fuse_lock); | |
858 | } | |
859 | } | |
860 | ||
69a53bf2 MS |
861 | /* |
862 | * Abort requests under I/O | |
863 | * | |
864 | * The requests are set to interrupted and finished, and the request | |
865 | * waiter is woken up. This will make request_wait_answer() wait | |
866 | * until the request is unlocked and then return. | |
867 | */ | |
868 | static void end_io_requests(struct fuse_conn *fc) | |
869 | { | |
870 | while (!list_empty(&fc->io)) { | |
871 | struct fuse_req *req; | |
872 | req = list_entry(fc->io.next, struct fuse_req, list); | |
873 | req->interrupted = 1; | |
874 | req->out.h.error = -ECONNABORTED; | |
875 | req->state = FUSE_REQ_FINISHED; | |
876 | list_del_init(&req->list); | |
877 | wake_up(&req->waitq); | |
878 | } | |
879 | } | |
880 | ||
881 | /* | |
882 | * Abort all requests. | |
883 | * | |
884 | * Emergency exit in case of a malicious or accidental deadlock, or | |
885 | * just a hung filesystem. | |
886 | * | |
887 | * The same effect is usually achievable through killing the | |
888 | * filesystem daemon and all users of the filesystem. The exception | |
889 | * is the combination of an asynchronous request and the tricky | |
890 | * deadlock (see Documentation/filesystems/fuse.txt). | |
891 | * | |
892 | * During the aborting, progression of requests from the pending and | |
893 | * processing lists onto the io list, and progression of new requests | |
894 | * onto the pending list is prevented by req->connected being false. | |
895 | * | |
896 | * Progression of requests under I/O to the processing list is | |
897 | * prevented by the req->interrupted flag being true for these | |
898 | * requests. For this reason requests on the io list must be aborted | |
899 | * first. | |
900 | */ | |
901 | void fuse_abort_conn(struct fuse_conn *fc) | |
902 | { | |
903 | spin_lock(&fuse_lock); | |
904 | if (fc->connected) { | |
905 | fc->connected = 0; | |
906 | end_io_requests(fc); | |
907 | end_requests(fc, &fc->pending); | |
908 | end_requests(fc, &fc->processing); | |
909 | wake_up_all(&fc->waitq); | |
910 | } | |
911 | spin_unlock(&fuse_lock); | |
912 | } | |
913 | ||
334f485d MS |
914 | static int fuse_dev_release(struct inode *inode, struct file *file) |
915 | { | |
916 | struct fuse_conn *fc; | |
917 | ||
918 | spin_lock(&fuse_lock); | |
919 | fc = file->private_data; | |
920 | if (fc) { | |
1e9a4ed9 | 921 | fc->connected = 0; |
334f485d MS |
922 | end_requests(fc, &fc->pending); |
923 | end_requests(fc, &fc->processing); | |
334f485d MS |
924 | } |
925 | spin_unlock(&fuse_lock); | |
f543f253 MS |
926 | if (fc) |
927 | kobject_put(&fc->kobj); | |
928 | ||
334f485d MS |
929 | return 0; |
930 | } | |
931 | ||
932 | struct file_operations fuse_dev_operations = { | |
933 | .owner = THIS_MODULE, | |
934 | .llseek = no_llseek, | |
935 | .read = fuse_dev_read, | |
936 | .readv = fuse_dev_readv, | |
937 | .write = fuse_dev_write, | |
938 | .writev = fuse_dev_writev, | |
939 | .poll = fuse_dev_poll, | |
940 | .release = fuse_dev_release, | |
941 | }; | |
942 | ||
943 | static struct miscdevice fuse_miscdevice = { | |
944 | .minor = FUSE_MINOR, | |
945 | .name = "fuse", | |
946 | .fops = &fuse_dev_operations, | |
947 | }; | |
948 | ||
949 | int __init fuse_dev_init(void) | |
950 | { | |
951 | int err = -ENOMEM; | |
952 | fuse_req_cachep = kmem_cache_create("fuse_request", | |
953 | sizeof(struct fuse_req), | |
954 | 0, 0, NULL, NULL); | |
955 | if (!fuse_req_cachep) | |
956 | goto out; | |
957 | ||
958 | err = misc_register(&fuse_miscdevice); | |
959 | if (err) | |
960 | goto out_cache_clean; | |
961 | ||
962 | return 0; | |
963 | ||
964 | out_cache_clean: | |
965 | kmem_cache_destroy(fuse_req_cachep); | |
966 | out: | |
967 | return err; | |
968 | } | |
969 | ||
970 | void fuse_dev_cleanup(void) | |
971 | { | |
972 | misc_deregister(&fuse_miscdevice); | |
973 | kmem_cache_destroy(fuse_req_cachep); | |
974 | } |