]>
Commit | Line | Data |
---|---|---|
334f485d MS |
1 | /* |
2 | FUSE: Filesystem in Userspace | |
1729a16c | 3 | Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> |
334f485d MS |
4 | |
5 | This program can be distributed under the terms of the GNU GPL. | |
6 | See the file COPYING. | |
7 | */ | |
8 | ||
9 | #include "fuse_i.h" | |
10 | ||
11 | #include <linux/init.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/poll.h> | |
14 | #include <linux/uio.h> | |
15 | #include <linux/miscdevice.h> | |
16 | #include <linux/pagemap.h> | |
17 | #include <linux/file.h> | |
18 | #include <linux/slab.h> | |
dd3bb14f | 19 | #include <linux/pipe_fs_i.h> |
ce534fb0 MS |
20 | #include <linux/swap.h> |
21 | #include <linux/splice.h> | |
334f485d MS |
22 | |
23 | MODULE_ALIAS_MISCDEV(FUSE_MINOR); | |
24 | ||
e18b890b | 25 | static struct kmem_cache *fuse_req_cachep; |
334f485d | 26 | |
8bfc016d | 27 | static struct fuse_conn *fuse_get_conn(struct file *file) |
334f485d | 28 | { |
0720b315 MS |
29 | /* |
30 | * Lockless access is OK, because file->private data is set | |
31 | * once during mount and is valid until the file is released. | |
32 | */ | |
33 | return file->private_data; | |
334f485d MS |
34 | } |
35 | ||
8bfc016d | 36 | static void fuse_request_init(struct fuse_req *req) |
334f485d MS |
37 | { |
38 | memset(req, 0, sizeof(*req)); | |
39 | INIT_LIST_HEAD(&req->list); | |
a4d27e75 | 40 | INIT_LIST_HEAD(&req->intr_entry); |
334f485d MS |
41 | init_waitqueue_head(&req->waitq); |
42 | atomic_set(&req->count, 1); | |
43 | } | |
44 | ||
45 | struct fuse_req *fuse_request_alloc(void) | |
46 | { | |
e94b1766 | 47 | struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL); |
334f485d MS |
48 | if (req) |
49 | fuse_request_init(req); | |
50 | return req; | |
51 | } | |
08cbf542 | 52 | EXPORT_SYMBOL_GPL(fuse_request_alloc); |
334f485d | 53 | |
3be5a52b MS |
54 | struct fuse_req *fuse_request_alloc_nofs(void) |
55 | { | |
56 | struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS); | |
57 | if (req) | |
58 | fuse_request_init(req); | |
59 | return req; | |
60 | } | |
61 | ||
334f485d MS |
62 | void fuse_request_free(struct fuse_req *req) |
63 | { | |
64 | kmem_cache_free(fuse_req_cachep, req); | |
65 | } | |
66 | ||
8bfc016d | 67 | static void block_sigs(sigset_t *oldset) |
334f485d MS |
68 | { |
69 | sigset_t mask; | |
70 | ||
71 | siginitsetinv(&mask, sigmask(SIGKILL)); | |
72 | sigprocmask(SIG_BLOCK, &mask, oldset); | |
73 | } | |
74 | ||
8bfc016d | 75 | static void restore_sigs(sigset_t *oldset) |
334f485d MS |
76 | { |
77 | sigprocmask(SIG_SETMASK, oldset, NULL); | |
78 | } | |
79 | ||
334f485d MS |
80 | static void __fuse_get_request(struct fuse_req *req) |
81 | { | |
82 | atomic_inc(&req->count); | |
83 | } | |
84 | ||
85 | /* Must be called with > 1 refcount */ | |
86 | static void __fuse_put_request(struct fuse_req *req) | |
87 | { | |
88 | BUG_ON(atomic_read(&req->count) < 2); | |
89 | atomic_dec(&req->count); | |
90 | } | |
91 | ||
33649c91 MS |
92 | static void fuse_req_init_context(struct fuse_req *req) |
93 | { | |
2186a71c DH |
94 | req->in.h.uid = current_fsuid(); |
95 | req->in.h.gid = current_fsgid(); | |
33649c91 MS |
96 | req->in.h.pid = current->pid; |
97 | } | |
98 | ||
ce1d5a49 | 99 | struct fuse_req *fuse_get_req(struct fuse_conn *fc) |
334f485d | 100 | { |
08a53cdc MS |
101 | struct fuse_req *req; |
102 | sigset_t oldset; | |
9bc5ddda | 103 | int intr; |
08a53cdc MS |
104 | int err; |
105 | ||
9bc5ddda | 106 | atomic_inc(&fc->num_waiting); |
08a53cdc | 107 | block_sigs(&oldset); |
9bc5ddda | 108 | intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked); |
08a53cdc | 109 | restore_sigs(&oldset); |
9bc5ddda MS |
110 | err = -EINTR; |
111 | if (intr) | |
112 | goto out; | |
08a53cdc | 113 | |
51eb01e7 MS |
114 | err = -ENOTCONN; |
115 | if (!fc->connected) | |
116 | goto out; | |
117 | ||
08a53cdc | 118 | req = fuse_request_alloc(); |
9bc5ddda | 119 | err = -ENOMEM; |
ce1d5a49 | 120 | if (!req) |
9bc5ddda | 121 | goto out; |
334f485d | 122 | |
33649c91 | 123 | fuse_req_init_context(req); |
9bc5ddda | 124 | req->waiting = 1; |
334f485d | 125 | return req; |
9bc5ddda MS |
126 | |
127 | out: | |
128 | atomic_dec(&fc->num_waiting); | |
129 | return ERR_PTR(err); | |
334f485d | 130 | } |
08cbf542 | 131 | EXPORT_SYMBOL_GPL(fuse_get_req); |
334f485d | 132 | |
33649c91 MS |
133 | /* |
134 | * Return request in fuse_file->reserved_req. However that may | |
135 | * currently be in use. If that is the case, wait for it to become | |
136 | * available. | |
137 | */ | |
138 | static struct fuse_req *get_reserved_req(struct fuse_conn *fc, | |
139 | struct file *file) | |
140 | { | |
141 | struct fuse_req *req = NULL; | |
142 | struct fuse_file *ff = file->private_data; | |
143 | ||
144 | do { | |
de5e3dec | 145 | wait_event(fc->reserved_req_waitq, ff->reserved_req); |
33649c91 MS |
146 | spin_lock(&fc->lock); |
147 | if (ff->reserved_req) { | |
148 | req = ff->reserved_req; | |
149 | ff->reserved_req = NULL; | |
150 | get_file(file); | |
151 | req->stolen_file = file; | |
152 | } | |
153 | spin_unlock(&fc->lock); | |
154 | } while (!req); | |
155 | ||
156 | return req; | |
157 | } | |
158 | ||
159 | /* | |
160 | * Put stolen request back into fuse_file->reserved_req | |
161 | */ | |
162 | static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) | |
163 | { | |
164 | struct file *file = req->stolen_file; | |
165 | struct fuse_file *ff = file->private_data; | |
166 | ||
167 | spin_lock(&fc->lock); | |
168 | fuse_request_init(req); | |
169 | BUG_ON(ff->reserved_req); | |
170 | ff->reserved_req = req; | |
de5e3dec | 171 | wake_up_all(&fc->reserved_req_waitq); |
33649c91 MS |
172 | spin_unlock(&fc->lock); |
173 | fput(file); | |
174 | } | |
175 | ||
176 | /* | |
177 | * Gets a requests for a file operation, always succeeds | |
178 | * | |
179 | * This is used for sending the FLUSH request, which must get to | |
180 | * userspace, due to POSIX locks which may need to be unlocked. | |
181 | * | |
182 | * If allocation fails due to OOM, use the reserved request in | |
183 | * fuse_file. | |
184 | * | |
185 | * This is very unlikely to deadlock accidentally, since the | |
186 | * filesystem should not have it's own file open. If deadlock is | |
187 | * intentional, it can still be broken by "aborting" the filesystem. | |
188 | */ | |
189 | struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file) | |
190 | { | |
191 | struct fuse_req *req; | |
192 | ||
193 | atomic_inc(&fc->num_waiting); | |
194 | wait_event(fc->blocked_waitq, !fc->blocked); | |
195 | req = fuse_request_alloc(); | |
196 | if (!req) | |
197 | req = get_reserved_req(fc, file); | |
198 | ||
199 | fuse_req_init_context(req); | |
200 | req->waiting = 1; | |
201 | return req; | |
202 | } | |
203 | ||
334f485d | 204 | void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) |
7128ec2a MS |
205 | { |
206 | if (atomic_dec_and_test(&req->count)) { | |
9bc5ddda MS |
207 | if (req->waiting) |
208 | atomic_dec(&fc->num_waiting); | |
33649c91 MS |
209 | |
210 | if (req->stolen_file) | |
211 | put_reserved_req(fc, req); | |
212 | else | |
213 | fuse_request_free(req); | |
7128ec2a MS |
214 | } |
215 | } | |
08cbf542 | 216 | EXPORT_SYMBOL_GPL(fuse_put_request); |
7128ec2a | 217 | |
d12def1b MS |
218 | static unsigned len_args(unsigned numargs, struct fuse_arg *args) |
219 | { | |
220 | unsigned nbytes = 0; | |
221 | unsigned i; | |
222 | ||
223 | for (i = 0; i < numargs; i++) | |
224 | nbytes += args[i].size; | |
225 | ||
226 | return nbytes; | |
227 | } | |
228 | ||
229 | static u64 fuse_get_unique(struct fuse_conn *fc) | |
230 | { | |
231 | fc->reqctr++; | |
232 | /* zero is special */ | |
233 | if (fc->reqctr == 0) | |
234 | fc->reqctr = 1; | |
235 | ||
236 | return fc->reqctr; | |
237 | } | |
238 | ||
239 | static void queue_request(struct fuse_conn *fc, struct fuse_req *req) | |
240 | { | |
241 | req->in.h.unique = fuse_get_unique(fc); | |
242 | req->in.h.len = sizeof(struct fuse_in_header) + | |
243 | len_args(req->in.numargs, (struct fuse_arg *) req->in.args); | |
244 | list_add_tail(&req->list, &fc->pending); | |
245 | req->state = FUSE_REQ_PENDING; | |
246 | if (!req->waiting) { | |
247 | req->waiting = 1; | |
248 | atomic_inc(&fc->num_waiting); | |
249 | } | |
250 | wake_up(&fc->waitq); | |
251 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | |
252 | } | |
253 | ||
254 | static void flush_bg_queue(struct fuse_conn *fc) | |
255 | { | |
7a6d3c8b | 256 | while (fc->active_background < fc->max_background && |
d12def1b MS |
257 | !list_empty(&fc->bg_queue)) { |
258 | struct fuse_req *req; | |
259 | ||
260 | req = list_entry(fc->bg_queue.next, struct fuse_req, list); | |
261 | list_del(&req->list); | |
262 | fc->active_background++; | |
263 | queue_request(fc, req); | |
264 | } | |
265 | } | |
266 | ||
334f485d MS |
267 | /* |
268 | * This function is called when a request is finished. Either a reply | |
f9a2842e | 269 | * has arrived or it was aborted (and not yet sent) or some error |
f43b155a | 270 | * occurred during communication with userspace, or the device file |
51eb01e7 MS |
271 | * was closed. The requester thread is woken up (if still waiting), |
272 | * the 'end' callback is called if given, else the reference to the | |
273 | * request is released | |
7128ec2a | 274 | * |
d7133114 | 275 | * Called with fc->lock, unlocks it |
334f485d MS |
276 | */ |
277 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) | |
5d9ec854 | 278 | __releases(&fc->lock) |
334f485d | 279 | { |
51eb01e7 MS |
280 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; |
281 | req->end = NULL; | |
d77a1d5b | 282 | list_del(&req->list); |
a4d27e75 | 283 | list_del(&req->intr_entry); |
83cfd493 | 284 | req->state = FUSE_REQ_FINISHED; |
51eb01e7 | 285 | if (req->background) { |
7a6d3c8b | 286 | if (fc->num_background == fc->max_background) { |
51eb01e7 MS |
287 | fc->blocked = 0; |
288 | wake_up_all(&fc->blocked_waitq); | |
289 | } | |
7a6d3c8b | 290 | if (fc->num_background == fc->congestion_threshold && |
a325f9b9 | 291 | fc->connected && fc->bdi_initialized) { |
8aa7e847 JA |
292 | clear_bdi_congested(&fc->bdi, BLK_RW_SYNC); |
293 | clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC); | |
f92b99b9 | 294 | } |
51eb01e7 | 295 | fc->num_background--; |
d12def1b MS |
296 | fc->active_background--; |
297 | flush_bg_queue(fc); | |
334f485d | 298 | } |
51eb01e7 | 299 | spin_unlock(&fc->lock); |
51eb01e7 MS |
300 | wake_up(&req->waitq); |
301 | if (end) | |
302 | end(fc, req); | |
e9bb09dd | 303 | fuse_put_request(fc, req); |
334f485d MS |
304 | } |
305 | ||
a4d27e75 MS |
306 | static void wait_answer_interruptible(struct fuse_conn *fc, |
307 | struct fuse_req *req) | |
5d9ec854 HH |
308 | __releases(&fc->lock) |
309 | __acquires(&fc->lock) | |
a4d27e75 MS |
310 | { |
311 | if (signal_pending(current)) | |
312 | return; | |
313 | ||
314 | spin_unlock(&fc->lock); | |
315 | wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); | |
316 | spin_lock(&fc->lock); | |
317 | } | |
318 | ||
319 | static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) | |
320 | { | |
321 | list_add_tail(&req->intr_entry, &fc->interrupts); | |
322 | wake_up(&fc->waitq); | |
323 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | |
324 | } | |
325 | ||
7c352bdf | 326 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) |
5d9ec854 HH |
327 | __releases(&fc->lock) |
328 | __acquires(&fc->lock) | |
334f485d | 329 | { |
a4d27e75 MS |
330 | if (!fc->no_interrupt) { |
331 | /* Any signal may interrupt this */ | |
332 | wait_answer_interruptible(fc, req); | |
334f485d | 333 | |
a4d27e75 MS |
334 | if (req->aborted) |
335 | goto aborted; | |
336 | if (req->state == FUSE_REQ_FINISHED) | |
337 | return; | |
338 | ||
339 | req->interrupted = 1; | |
340 | if (req->state == FUSE_REQ_SENT) | |
341 | queue_interrupt(fc, req); | |
342 | } | |
343 | ||
a131de0a | 344 | if (!req->force) { |
a4d27e75 MS |
345 | sigset_t oldset; |
346 | ||
347 | /* Only fatal signals may interrupt this */ | |
51eb01e7 | 348 | block_sigs(&oldset); |
a4d27e75 | 349 | wait_answer_interruptible(fc, req); |
51eb01e7 | 350 | restore_sigs(&oldset); |
a131de0a MS |
351 | |
352 | if (req->aborted) | |
353 | goto aborted; | |
354 | if (req->state == FUSE_REQ_FINISHED) | |
355 | return; | |
356 | ||
357 | /* Request is not yet in userspace, bail out */ | |
358 | if (req->state == FUSE_REQ_PENDING) { | |
359 | list_del(&req->list); | |
360 | __fuse_put_request(req); | |
361 | req->out.h.error = -EINTR; | |
362 | return; | |
363 | } | |
51eb01e7 | 364 | } |
334f485d | 365 | |
a131de0a MS |
366 | /* |
367 | * Either request is already in userspace, or it was forced. | |
368 | * Wait it out. | |
369 | */ | |
370 | spin_unlock(&fc->lock); | |
371 | wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); | |
372 | spin_lock(&fc->lock); | |
a4d27e75 | 373 | |
a131de0a MS |
374 | if (!req->aborted) |
375 | return; | |
a4d27e75 MS |
376 | |
377 | aborted: | |
a131de0a | 378 | BUG_ON(req->state != FUSE_REQ_FINISHED); |
334f485d MS |
379 | if (req->locked) { |
380 | /* This is uninterruptible sleep, because data is | |
381 | being copied to/from the buffers of req. During | |
382 | locked state, there mustn't be any filesystem | |
383 | operation (e.g. page fault), since that could lead | |
384 | to deadlock */ | |
d7133114 | 385 | spin_unlock(&fc->lock); |
334f485d | 386 | wait_event(req->waitq, !req->locked); |
d7133114 | 387 | spin_lock(&fc->lock); |
334f485d | 388 | } |
334f485d MS |
389 | } |
390 | ||
b93f858a | 391 | void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) |
334f485d MS |
392 | { |
393 | req->isreply = 1; | |
d7133114 | 394 | spin_lock(&fc->lock); |
1e9a4ed9 | 395 | if (!fc->connected) |
334f485d MS |
396 | req->out.h.error = -ENOTCONN; |
397 | else if (fc->conn_error) | |
398 | req->out.h.error = -ECONNREFUSED; | |
399 | else { | |
400 | queue_request(fc, req); | |
401 | /* acquire extra reference, since request is still needed | |
402 | after request_end() */ | |
403 | __fuse_get_request(req); | |
404 | ||
7c352bdf | 405 | request_wait_answer(fc, req); |
334f485d | 406 | } |
d7133114 | 407 | spin_unlock(&fc->lock); |
334f485d | 408 | } |
08cbf542 | 409 | EXPORT_SYMBOL_GPL(fuse_request_send); |
334f485d | 410 | |
b93f858a TH |
411 | static void fuse_request_send_nowait_locked(struct fuse_conn *fc, |
412 | struct fuse_req *req) | |
d12def1b MS |
413 | { |
414 | req->background = 1; | |
415 | fc->num_background++; | |
7a6d3c8b | 416 | if (fc->num_background == fc->max_background) |
d12def1b | 417 | fc->blocked = 1; |
7a6d3c8b | 418 | if (fc->num_background == fc->congestion_threshold && |
a325f9b9 | 419 | fc->bdi_initialized) { |
8aa7e847 JA |
420 | set_bdi_congested(&fc->bdi, BLK_RW_SYNC); |
421 | set_bdi_congested(&fc->bdi, BLK_RW_ASYNC); | |
d12def1b MS |
422 | } |
423 | list_add_tail(&req->list, &fc->bg_queue); | |
424 | flush_bg_queue(fc); | |
425 | } | |
426 | ||
b93f858a | 427 | static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) |
334f485d | 428 | { |
d7133114 | 429 | spin_lock(&fc->lock); |
1e9a4ed9 | 430 | if (fc->connected) { |
b93f858a | 431 | fuse_request_send_nowait_locked(fc, req); |
d7133114 | 432 | spin_unlock(&fc->lock); |
334f485d MS |
433 | } else { |
434 | req->out.h.error = -ENOTCONN; | |
435 | request_end(fc, req); | |
436 | } | |
437 | } | |
438 | ||
b93f858a | 439 | void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req) |
334f485d MS |
440 | { |
441 | req->isreply = 0; | |
b93f858a | 442 | fuse_request_send_nowait(fc, req); |
334f485d MS |
443 | } |
444 | ||
b93f858a | 445 | void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) |
334f485d MS |
446 | { |
447 | req->isreply = 1; | |
b93f858a | 448 | fuse_request_send_nowait(fc, req); |
334f485d | 449 | } |
08cbf542 | 450 | EXPORT_SYMBOL_GPL(fuse_request_send_background); |
334f485d | 451 | |
3be5a52b MS |
452 | /* |
453 | * Called under fc->lock | |
454 | * | |
455 | * fc->connected must have been checked previously | |
456 | */ | |
b93f858a TH |
457 | void fuse_request_send_background_locked(struct fuse_conn *fc, |
458 | struct fuse_req *req) | |
3be5a52b MS |
459 | { |
460 | req->isreply = 1; | |
b93f858a | 461 | fuse_request_send_nowait_locked(fc, req); |
3be5a52b MS |
462 | } |
463 | ||
334f485d MS |
464 | /* |
465 | * Lock the request. Up to the next unlock_request() there mustn't be | |
466 | * anything that could cause a page-fault. If the request was already | |
f9a2842e | 467 | * aborted bail out. |
334f485d | 468 | */ |
d7133114 | 469 | static int lock_request(struct fuse_conn *fc, struct fuse_req *req) |
334f485d MS |
470 | { |
471 | int err = 0; | |
472 | if (req) { | |
d7133114 | 473 | spin_lock(&fc->lock); |
f9a2842e | 474 | if (req->aborted) |
334f485d MS |
475 | err = -ENOENT; |
476 | else | |
477 | req->locked = 1; | |
d7133114 | 478 | spin_unlock(&fc->lock); |
334f485d MS |
479 | } |
480 | return err; | |
481 | } | |
482 | ||
483 | /* | |
f9a2842e | 484 | * Unlock request. If it was aborted during being locked, the |
334f485d MS |
485 | * requester thread is currently waiting for it to be unlocked, so |
486 | * wake it up. | |
487 | */ | |
d7133114 | 488 | static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) |
334f485d MS |
489 | { |
490 | if (req) { | |
d7133114 | 491 | spin_lock(&fc->lock); |
334f485d | 492 | req->locked = 0; |
f9a2842e | 493 | if (req->aborted) |
334f485d | 494 | wake_up(&req->waitq); |
d7133114 | 495 | spin_unlock(&fc->lock); |
334f485d MS |
496 | } |
497 | } | |
498 | ||
499 | struct fuse_copy_state { | |
d7133114 | 500 | struct fuse_conn *fc; |
334f485d MS |
501 | int write; |
502 | struct fuse_req *req; | |
503 | const struct iovec *iov; | |
dd3bb14f MS |
504 | struct pipe_buffer *pipebufs; |
505 | struct pipe_buffer *currbuf; | |
506 | struct pipe_inode_info *pipe; | |
334f485d MS |
507 | unsigned long nr_segs; |
508 | unsigned long seglen; | |
509 | unsigned long addr; | |
510 | struct page *pg; | |
511 | void *mapaddr; | |
512 | void *buf; | |
513 | unsigned len; | |
ce534fb0 | 514 | unsigned move_pages:1; |
334f485d MS |
515 | }; |
516 | ||
d7133114 MS |
517 | static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, |
518 | int write, struct fuse_req *req, | |
519 | const struct iovec *iov, unsigned long nr_segs) | |
334f485d MS |
520 | { |
521 | memset(cs, 0, sizeof(*cs)); | |
d7133114 | 522 | cs->fc = fc; |
334f485d MS |
523 | cs->write = write; |
524 | cs->req = req; | |
525 | cs->iov = iov; | |
526 | cs->nr_segs = nr_segs; | |
527 | } | |
528 | ||
529 | /* Unmap and put previous page of userspace buffer */ | |
8bfc016d | 530 | static void fuse_copy_finish(struct fuse_copy_state *cs) |
334f485d | 531 | { |
dd3bb14f MS |
532 | if (cs->currbuf) { |
533 | struct pipe_buffer *buf = cs->currbuf; | |
534 | ||
535 | buf->ops->unmap(cs->pipe, buf, cs->mapaddr); | |
536 | ||
537 | cs->currbuf = NULL; | |
538 | cs->mapaddr = NULL; | |
539 | } else if (cs->mapaddr) { | |
334f485d MS |
540 | kunmap_atomic(cs->mapaddr, KM_USER0); |
541 | if (cs->write) { | |
542 | flush_dcache_page(cs->pg); | |
543 | set_page_dirty_lock(cs->pg); | |
544 | } | |
545 | put_page(cs->pg); | |
546 | cs->mapaddr = NULL; | |
547 | } | |
548 | } | |
549 | ||
550 | /* | |
551 | * Get another pagefull of userspace buffer, and map it to kernel | |
552 | * address space, and lock request | |
553 | */ | |
554 | static int fuse_copy_fill(struct fuse_copy_state *cs) | |
555 | { | |
556 | unsigned long offset; | |
557 | int err; | |
558 | ||
d7133114 | 559 | unlock_request(cs->fc, cs->req); |
334f485d | 560 | fuse_copy_finish(cs); |
dd3bb14f MS |
561 | if (cs->pipebufs) { |
562 | struct pipe_buffer *buf = cs->pipebufs; | |
563 | ||
564 | err = buf->ops->confirm(cs->pipe, buf); | |
565 | if (err) | |
566 | return err; | |
567 | ||
334f485d | 568 | BUG_ON(!cs->nr_segs); |
dd3bb14f MS |
569 | cs->currbuf = buf; |
570 | cs->mapaddr = buf->ops->map(cs->pipe, buf, 1); | |
571 | cs->len = buf->len; | |
572 | cs->buf = cs->mapaddr + buf->offset; | |
573 | cs->pipebufs++; | |
1729a16c | 574 | cs->nr_segs--; |
dd3bb14f MS |
575 | } else { |
576 | if (!cs->seglen) { | |
577 | BUG_ON(!cs->nr_segs); | |
578 | cs->seglen = cs->iov[0].iov_len; | |
579 | cs->addr = (unsigned long) cs->iov[0].iov_base; | |
580 | cs->iov++; | |
581 | cs->nr_segs--; | |
582 | } | |
583 | err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg); | |
584 | if (err < 0) | |
585 | return err; | |
586 | BUG_ON(err != 1); | |
587 | offset = cs->addr % PAGE_SIZE; | |
588 | cs->mapaddr = kmap_atomic(cs->pg, KM_USER0); | |
589 | cs->buf = cs->mapaddr + offset; | |
590 | cs->len = min(PAGE_SIZE - offset, cs->seglen); | |
591 | cs->seglen -= cs->len; | |
592 | cs->addr += cs->len; | |
334f485d | 593 | } |
334f485d | 594 | |
d7133114 | 595 | return lock_request(cs->fc, cs->req); |
334f485d MS |
596 | } |
597 | ||
598 | /* Do as much copy to/from userspace buffer as we can */ | |
8bfc016d | 599 | static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) |
334f485d MS |
600 | { |
601 | unsigned ncpy = min(*size, cs->len); | |
602 | if (val) { | |
603 | if (cs->write) | |
604 | memcpy(cs->buf, *val, ncpy); | |
605 | else | |
606 | memcpy(*val, cs->buf, ncpy); | |
607 | *val += ncpy; | |
608 | } | |
609 | *size -= ncpy; | |
610 | cs->len -= ncpy; | |
611 | cs->buf += ncpy; | |
612 | return ncpy; | |
613 | } | |
614 | ||
ce534fb0 MS |
615 | static int fuse_check_page(struct page *page) |
616 | { | |
617 | if (page_mapcount(page) || | |
618 | page->mapping != NULL || | |
619 | page_count(page) != 1 || | |
620 | (page->flags & PAGE_FLAGS_CHECK_AT_PREP & | |
621 | ~(1 << PG_locked | | |
622 | 1 << PG_referenced | | |
623 | 1 << PG_uptodate | | |
624 | 1 << PG_lru | | |
625 | 1 << PG_active | | |
626 | 1 << PG_reclaim))) { | |
627 | printk(KERN_WARNING "fuse: trying to steal weird page\n"); | |
628 | printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping); | |
629 | return 1; | |
630 | } | |
631 | return 0; | |
632 | } | |
633 | ||
634 | static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) | |
635 | { | |
636 | int err; | |
637 | struct page *oldpage = *pagep; | |
638 | struct page *newpage; | |
639 | struct pipe_buffer *buf = cs->pipebufs; | |
640 | struct address_space *mapping; | |
641 | pgoff_t index; | |
642 | ||
643 | unlock_request(cs->fc, cs->req); | |
644 | fuse_copy_finish(cs); | |
645 | ||
646 | err = buf->ops->confirm(cs->pipe, buf); | |
647 | if (err) | |
648 | return err; | |
649 | ||
650 | BUG_ON(!cs->nr_segs); | |
651 | cs->currbuf = buf; | |
652 | cs->len = buf->len; | |
653 | cs->pipebufs++; | |
654 | cs->nr_segs--; | |
655 | ||
656 | if (cs->len != PAGE_SIZE) | |
657 | goto out_fallback; | |
658 | ||
659 | if (buf->ops->steal(cs->pipe, buf) != 0) | |
660 | goto out_fallback; | |
661 | ||
662 | newpage = buf->page; | |
663 | ||
664 | if (WARN_ON(!PageUptodate(newpage))) | |
665 | return -EIO; | |
666 | ||
667 | ClearPageMappedToDisk(newpage); | |
668 | ||
669 | if (fuse_check_page(newpage) != 0) | |
670 | goto out_fallback_unlock; | |
671 | ||
672 | mapping = oldpage->mapping; | |
673 | index = oldpage->index; | |
674 | ||
675 | /* | |
676 | * This is a new and locked page, it shouldn't be mapped or | |
677 | * have any special flags on it | |
678 | */ | |
679 | if (WARN_ON(page_mapped(oldpage))) | |
680 | goto out_fallback_unlock; | |
681 | if (WARN_ON(page_has_private(oldpage))) | |
682 | goto out_fallback_unlock; | |
683 | if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage))) | |
684 | goto out_fallback_unlock; | |
685 | if (WARN_ON(PageMlocked(oldpage))) | |
686 | goto out_fallback_unlock; | |
687 | ||
688 | remove_from_page_cache(oldpage); | |
689 | page_cache_release(oldpage); | |
690 | ||
691 | err = add_to_page_cache_locked(newpage, mapping, index, GFP_KERNEL); | |
692 | if (err) { | |
693 | printk(KERN_WARNING "fuse_try_move_page: failed to add page"); | |
694 | goto out_fallback_unlock; | |
695 | } | |
696 | page_cache_get(newpage); | |
697 | ||
698 | if (!(buf->flags & PIPE_BUF_FLAG_LRU)) | |
699 | lru_cache_add_file(newpage); | |
700 | ||
701 | err = 0; | |
702 | spin_lock(&cs->fc->lock); | |
703 | if (cs->req->aborted) | |
704 | err = -ENOENT; | |
705 | else | |
706 | *pagep = newpage; | |
707 | spin_unlock(&cs->fc->lock); | |
708 | ||
709 | if (err) { | |
710 | unlock_page(newpage); | |
711 | page_cache_release(newpage); | |
712 | return err; | |
713 | } | |
714 | ||
715 | unlock_page(oldpage); | |
716 | page_cache_release(oldpage); | |
717 | cs->len = 0; | |
718 | ||
719 | return 0; | |
720 | ||
721 | out_fallback_unlock: | |
722 | unlock_page(newpage); | |
723 | out_fallback: | |
724 | cs->mapaddr = buf->ops->map(cs->pipe, buf, 1); | |
725 | cs->buf = cs->mapaddr + buf->offset; | |
726 | ||
727 | err = lock_request(cs->fc, cs->req); | |
728 | if (err) | |
729 | return err; | |
730 | ||
731 | return 1; | |
732 | } | |
733 | ||
334f485d MS |
734 | /* |
735 | * Copy a page in the request to/from the userspace buffer. Must be | |
736 | * done atomically | |
737 | */ | |
ce534fb0 | 738 | static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, |
8bfc016d | 739 | unsigned offset, unsigned count, int zeroing) |
334f485d | 740 | { |
ce534fb0 MS |
741 | int err; |
742 | struct page *page = *pagep; | |
743 | ||
334f485d MS |
744 | if (page && zeroing && count < PAGE_SIZE) { |
745 | void *mapaddr = kmap_atomic(page, KM_USER1); | |
746 | memset(mapaddr, 0, PAGE_SIZE); | |
747 | kunmap_atomic(mapaddr, KM_USER1); | |
748 | } | |
749 | while (count) { | |
1729a16c | 750 | if (!cs->len) { |
ce534fb0 MS |
751 | if (cs->move_pages && page && |
752 | offset == 0 && count == PAGE_SIZE) { | |
753 | err = fuse_try_move_page(cs, pagep); | |
754 | if (err <= 0) | |
755 | return err; | |
756 | } else { | |
757 | err = fuse_copy_fill(cs); | |
758 | if (err) | |
759 | return err; | |
760 | } | |
1729a16c | 761 | } |
334f485d MS |
762 | if (page) { |
763 | void *mapaddr = kmap_atomic(page, KM_USER1); | |
764 | void *buf = mapaddr + offset; | |
765 | offset += fuse_copy_do(cs, &buf, &count); | |
766 | kunmap_atomic(mapaddr, KM_USER1); | |
767 | } else | |
768 | offset += fuse_copy_do(cs, NULL, &count); | |
769 | } | |
770 | if (page && !cs->write) | |
771 | flush_dcache_page(page); | |
772 | return 0; | |
773 | } | |
774 | ||
775 | /* Copy pages in the request to/from userspace buffer */ | |
776 | static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, | |
777 | int zeroing) | |
778 | { | |
779 | unsigned i; | |
780 | struct fuse_req *req = cs->req; | |
781 | unsigned offset = req->page_offset; | |
782 | unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset); | |
783 | ||
784 | for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { | |
ce534fb0 MS |
785 | int err; |
786 | ||
787 | err = fuse_copy_page(cs, &req->pages[i], offset, count, | |
788 | zeroing); | |
334f485d MS |
789 | if (err) |
790 | return err; | |
791 | ||
792 | nbytes -= count; | |
793 | count = min(nbytes, (unsigned) PAGE_SIZE); | |
794 | offset = 0; | |
795 | } | |
796 | return 0; | |
797 | } | |
798 | ||
799 | /* Copy a single argument in the request to/from userspace buffer */ | |
800 | static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) | |
801 | { | |
802 | while (size) { | |
1729a16c MS |
803 | if (!cs->len) { |
804 | int err = fuse_copy_fill(cs); | |
805 | if (err) | |
806 | return err; | |
807 | } | |
334f485d MS |
808 | fuse_copy_do(cs, &val, &size); |
809 | } | |
810 | return 0; | |
811 | } | |
812 | ||
813 | /* Copy request arguments to/from userspace buffer */ | |
814 | static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, | |
815 | unsigned argpages, struct fuse_arg *args, | |
816 | int zeroing) | |
817 | { | |
818 | int err = 0; | |
819 | unsigned i; | |
820 | ||
821 | for (i = 0; !err && i < numargs; i++) { | |
822 | struct fuse_arg *arg = &args[i]; | |
823 | if (i == numargs - 1 && argpages) | |
824 | err = fuse_copy_pages(cs, arg->size, zeroing); | |
825 | else | |
826 | err = fuse_copy_one(cs, arg->value, arg->size); | |
827 | } | |
828 | return err; | |
829 | } | |
830 | ||
a4d27e75 MS |
831 | static int request_pending(struct fuse_conn *fc) |
832 | { | |
833 | return !list_empty(&fc->pending) || !list_empty(&fc->interrupts); | |
834 | } | |
835 | ||
334f485d MS |
836 | /* Wait until a request is available on the pending list */ |
837 | static void request_wait(struct fuse_conn *fc) | |
5d9ec854 HH |
838 | __releases(&fc->lock) |
839 | __acquires(&fc->lock) | |
334f485d MS |
840 | { |
841 | DECLARE_WAITQUEUE(wait, current); | |
842 | ||
843 | add_wait_queue_exclusive(&fc->waitq, &wait); | |
a4d27e75 | 844 | while (fc->connected && !request_pending(fc)) { |
334f485d MS |
845 | set_current_state(TASK_INTERRUPTIBLE); |
846 | if (signal_pending(current)) | |
847 | break; | |
848 | ||
d7133114 | 849 | spin_unlock(&fc->lock); |
334f485d | 850 | schedule(); |
d7133114 | 851 | spin_lock(&fc->lock); |
334f485d MS |
852 | } |
853 | set_current_state(TASK_RUNNING); | |
854 | remove_wait_queue(&fc->waitq, &wait); | |
855 | } | |
856 | ||
a4d27e75 MS |
857 | /* |
858 | * Transfer an interrupt request to userspace | |
859 | * | |
860 | * Unlike other requests this is assembled on demand, without a need | |
861 | * to allocate a separate fuse_req structure. | |
862 | * | |
863 | * Called with fc->lock held, releases it | |
864 | */ | |
865 | static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req, | |
866 | const struct iovec *iov, unsigned long nr_segs) | |
5d9ec854 | 867 | __releases(&fc->lock) |
a4d27e75 MS |
868 | { |
869 | struct fuse_copy_state cs; | |
870 | struct fuse_in_header ih; | |
871 | struct fuse_interrupt_in arg; | |
872 | unsigned reqsize = sizeof(ih) + sizeof(arg); | |
873 | int err; | |
874 | ||
875 | list_del_init(&req->intr_entry); | |
876 | req->intr_unique = fuse_get_unique(fc); | |
877 | memset(&ih, 0, sizeof(ih)); | |
878 | memset(&arg, 0, sizeof(arg)); | |
879 | ih.len = reqsize; | |
880 | ih.opcode = FUSE_INTERRUPT; | |
881 | ih.unique = req->intr_unique; | |
882 | arg.unique = req->in.h.unique; | |
883 | ||
884 | spin_unlock(&fc->lock); | |
885 | if (iov_length(iov, nr_segs) < reqsize) | |
886 | return -EINVAL; | |
887 | ||
888 | fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs); | |
889 | err = fuse_copy_one(&cs, &ih, sizeof(ih)); | |
890 | if (!err) | |
891 | err = fuse_copy_one(&cs, &arg, sizeof(arg)); | |
892 | fuse_copy_finish(&cs); | |
893 | ||
894 | return err ? err : reqsize; | |
895 | } | |
896 | ||
334f485d MS |
897 | /* |
898 | * Read a single request into the userspace filesystem's buffer. This | |
899 | * function waits until a request is available, then removes it from | |
900 | * the pending list and copies request data to userspace buffer. If | |
f9a2842e MS |
901 | * no reply is needed (FORGET) or request has been aborted or there |
902 | * was an error during the copying then it's finished by calling | |
334f485d MS |
903 | * request_end(). Otherwise add it to the processing list, and set |
904 | * the 'sent' flag. | |
905 | */ | |
ee0b3e67 BP |
906 | static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, |
907 | unsigned long nr_segs, loff_t pos) | |
334f485d MS |
908 | { |
909 | int err; | |
334f485d MS |
910 | struct fuse_req *req; |
911 | struct fuse_in *in; | |
912 | struct fuse_copy_state cs; | |
913 | unsigned reqsize; | |
ee0b3e67 | 914 | struct file *file = iocb->ki_filp; |
0720b315 MS |
915 | struct fuse_conn *fc = fuse_get_conn(file); |
916 | if (!fc) | |
917 | return -EPERM; | |
334f485d | 918 | |
1d3d752b | 919 | restart: |
d7133114 | 920 | spin_lock(&fc->lock); |
e5ac1d1e JD |
921 | err = -EAGAIN; |
922 | if ((file->f_flags & O_NONBLOCK) && fc->connected && | |
a4d27e75 | 923 | !request_pending(fc)) |
e5ac1d1e JD |
924 | goto err_unlock; |
925 | ||
334f485d MS |
926 | request_wait(fc); |
927 | err = -ENODEV; | |
9ba7cbba | 928 | if (!fc->connected) |
334f485d MS |
929 | goto err_unlock; |
930 | err = -ERESTARTSYS; | |
a4d27e75 | 931 | if (!request_pending(fc)) |
334f485d MS |
932 | goto err_unlock; |
933 | ||
a4d27e75 MS |
934 | if (!list_empty(&fc->interrupts)) { |
935 | req = list_entry(fc->interrupts.next, struct fuse_req, | |
936 | intr_entry); | |
937 | return fuse_read_interrupt(fc, req, iov, nr_segs); | |
938 | } | |
939 | ||
334f485d | 940 | req = list_entry(fc->pending.next, struct fuse_req, list); |
83cfd493 | 941 | req->state = FUSE_REQ_READING; |
d77a1d5b | 942 | list_move(&req->list, &fc->io); |
334f485d MS |
943 | |
944 | in = &req->in; | |
1d3d752b MS |
945 | reqsize = in->h.len; |
946 | /* If request is too large, reply with an error and restart the read */ | |
947 | if (iov_length(iov, nr_segs) < reqsize) { | |
948 | req->out.h.error = -EIO; | |
949 | /* SETXATTR is special, since it may contain too large data */ | |
950 | if (in->h.opcode == FUSE_SETXATTR) | |
951 | req->out.h.error = -E2BIG; | |
952 | request_end(fc, req); | |
953 | goto restart; | |
334f485d | 954 | } |
d7133114 MS |
955 | spin_unlock(&fc->lock); |
956 | fuse_copy_init(&cs, fc, 1, req, iov, nr_segs); | |
1d3d752b MS |
957 | err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); |
958 | if (!err) | |
959 | err = fuse_copy_args(&cs, in->numargs, in->argpages, | |
960 | (struct fuse_arg *) in->args, 0); | |
334f485d | 961 | fuse_copy_finish(&cs); |
d7133114 | 962 | spin_lock(&fc->lock); |
334f485d | 963 | req->locked = 0; |
c9c9d7df MS |
964 | if (req->aborted) { |
965 | request_end(fc, req); | |
966 | return -ENODEV; | |
967 | } | |
334f485d | 968 | if (err) { |
c9c9d7df | 969 | req->out.h.error = -EIO; |
334f485d MS |
970 | request_end(fc, req); |
971 | return err; | |
972 | } | |
973 | if (!req->isreply) | |
974 | request_end(fc, req); | |
975 | else { | |
83cfd493 | 976 | req->state = FUSE_REQ_SENT; |
d77a1d5b | 977 | list_move_tail(&req->list, &fc->processing); |
a4d27e75 MS |
978 | if (req->interrupted) |
979 | queue_interrupt(fc, req); | |
d7133114 | 980 | spin_unlock(&fc->lock); |
334f485d MS |
981 | } |
982 | return reqsize; | |
983 | ||
984 | err_unlock: | |
d7133114 | 985 | spin_unlock(&fc->lock); |
334f485d MS |
986 | return err; |
987 | } | |
988 | ||
95668a69 TH |
989 | static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, |
990 | struct fuse_copy_state *cs) | |
991 | { | |
992 | struct fuse_notify_poll_wakeup_out outarg; | |
f6d47a17 | 993 | int err = -EINVAL; |
95668a69 TH |
994 | |
995 | if (size != sizeof(outarg)) | |
f6d47a17 | 996 | goto err; |
95668a69 TH |
997 | |
998 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); | |
999 | if (err) | |
f6d47a17 | 1000 | goto err; |
95668a69 | 1001 | |
f6d47a17 | 1002 | fuse_copy_finish(cs); |
95668a69 | 1003 | return fuse_notify_poll_wakeup(fc, &outarg); |
f6d47a17 MS |
1004 | |
1005 | err: | |
1006 | fuse_copy_finish(cs); | |
1007 | return err; | |
95668a69 TH |
1008 | } |
1009 | ||
3b463ae0 JM |
1010 | static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, |
1011 | struct fuse_copy_state *cs) | |
1012 | { | |
1013 | struct fuse_notify_inval_inode_out outarg; | |
1014 | int err = -EINVAL; | |
1015 | ||
1016 | if (size != sizeof(outarg)) | |
1017 | goto err; | |
1018 | ||
1019 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); | |
1020 | if (err) | |
1021 | goto err; | |
1022 | fuse_copy_finish(cs); | |
1023 | ||
1024 | down_read(&fc->killsb); | |
1025 | err = -ENOENT; | |
b21dda43 MS |
1026 | if (fc->sb) { |
1027 | err = fuse_reverse_inval_inode(fc->sb, outarg.ino, | |
1028 | outarg.off, outarg.len); | |
1029 | } | |
3b463ae0 JM |
1030 | up_read(&fc->killsb); |
1031 | return err; | |
1032 | ||
1033 | err: | |
1034 | fuse_copy_finish(cs); | |
1035 | return err; | |
1036 | } | |
1037 | ||
1038 | static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, | |
1039 | struct fuse_copy_state *cs) | |
1040 | { | |
1041 | struct fuse_notify_inval_entry_out outarg; | |
b2d82ee3 FW |
1042 | int err = -ENOMEM; |
1043 | char *buf; | |
3b463ae0 JM |
1044 | struct qstr name; |
1045 | ||
b2d82ee3 FW |
1046 | buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); |
1047 | if (!buf) | |
1048 | goto err; | |
1049 | ||
1050 | err = -EINVAL; | |
3b463ae0 JM |
1051 | if (size < sizeof(outarg)) |
1052 | goto err; | |
1053 | ||
1054 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); | |
1055 | if (err) | |
1056 | goto err; | |
1057 | ||
1058 | err = -ENAMETOOLONG; | |
1059 | if (outarg.namelen > FUSE_NAME_MAX) | |
1060 | goto err; | |
1061 | ||
1062 | name.name = buf; | |
1063 | name.len = outarg.namelen; | |
1064 | err = fuse_copy_one(cs, buf, outarg.namelen + 1); | |
1065 | if (err) | |
1066 | goto err; | |
1067 | fuse_copy_finish(cs); | |
1068 | buf[outarg.namelen] = 0; | |
1069 | name.hash = full_name_hash(name.name, name.len); | |
1070 | ||
1071 | down_read(&fc->killsb); | |
1072 | err = -ENOENT; | |
b21dda43 MS |
1073 | if (fc->sb) |
1074 | err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name); | |
3b463ae0 | 1075 | up_read(&fc->killsb); |
b2d82ee3 | 1076 | kfree(buf); |
3b463ae0 JM |
1077 | return err; |
1078 | ||
1079 | err: | |
b2d82ee3 | 1080 | kfree(buf); |
3b463ae0 JM |
1081 | fuse_copy_finish(cs); |
1082 | return err; | |
1083 | } | |
1084 | ||
8599396b TH |
1085 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, |
1086 | unsigned int size, struct fuse_copy_state *cs) | |
1087 | { | |
1088 | switch (code) { | |
95668a69 TH |
1089 | case FUSE_NOTIFY_POLL: |
1090 | return fuse_notify_poll(fc, size, cs); | |
1091 | ||
3b463ae0 JM |
1092 | case FUSE_NOTIFY_INVAL_INODE: |
1093 | return fuse_notify_inval_inode(fc, size, cs); | |
1094 | ||
1095 | case FUSE_NOTIFY_INVAL_ENTRY: | |
1096 | return fuse_notify_inval_entry(fc, size, cs); | |
1097 | ||
8599396b | 1098 | default: |
f6d47a17 | 1099 | fuse_copy_finish(cs); |
8599396b TH |
1100 | return -EINVAL; |
1101 | } | |
1102 | } | |
1103 | ||
334f485d MS |
1104 | /* Look up request on processing list by unique ID */ |
1105 | static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) | |
1106 | { | |
1107 | struct list_head *entry; | |
1108 | ||
1109 | list_for_each(entry, &fc->processing) { | |
1110 | struct fuse_req *req; | |
1111 | req = list_entry(entry, struct fuse_req, list); | |
a4d27e75 | 1112 | if (req->in.h.unique == unique || req->intr_unique == unique) |
334f485d MS |
1113 | return req; |
1114 | } | |
1115 | return NULL; | |
1116 | } | |
1117 | ||
1118 | static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, | |
1119 | unsigned nbytes) | |
1120 | { | |
1121 | unsigned reqsize = sizeof(struct fuse_out_header); | |
1122 | ||
1123 | if (out->h.error) | |
1124 | return nbytes != reqsize ? -EINVAL : 0; | |
1125 | ||
1126 | reqsize += len_args(out->numargs, out->args); | |
1127 | ||
1128 | if (reqsize < nbytes || (reqsize > nbytes && !out->argvar)) | |
1129 | return -EINVAL; | |
1130 | else if (reqsize > nbytes) { | |
1131 | struct fuse_arg *lastarg = &out->args[out->numargs-1]; | |
1132 | unsigned diffsize = reqsize - nbytes; | |
1133 | if (diffsize > lastarg->size) | |
1134 | return -EINVAL; | |
1135 | lastarg->size -= diffsize; | |
1136 | } | |
1137 | return fuse_copy_args(cs, out->numargs, out->argpages, out->args, | |
1138 | out->page_zeroing); | |
1139 | } | |
1140 | ||
1141 | /* | |
1142 | * Write a single reply to a request. First the header is copied from | |
1143 | * the write buffer. The request is then searched on the processing | |
1144 | * list by the unique ID found in the header. If found, then remove | |
1145 | * it from the list and copy the rest of the buffer to the request. | |
1146 | * The request is finished by calling request_end() | |
1147 | */ | |
dd3bb14f MS |
1148 | static ssize_t fuse_dev_do_write(struct fuse_conn *fc, |
1149 | struct fuse_copy_state *cs, size_t nbytes) | |
334f485d MS |
1150 | { |
1151 | int err; | |
334f485d MS |
1152 | struct fuse_req *req; |
1153 | struct fuse_out_header oh; | |
334f485d | 1154 | |
334f485d MS |
1155 | if (nbytes < sizeof(struct fuse_out_header)) |
1156 | return -EINVAL; | |
1157 | ||
dd3bb14f | 1158 | err = fuse_copy_one(cs, &oh, sizeof(oh)); |
334f485d MS |
1159 | if (err) |
1160 | goto err_finish; | |
8599396b TH |
1161 | |
1162 | err = -EINVAL; | |
1163 | if (oh.len != nbytes) | |
1164 | goto err_finish; | |
1165 | ||
1166 | /* | |
1167 | * Zero oh.unique indicates unsolicited notification message | |
1168 | * and error contains notification code. | |
1169 | */ | |
1170 | if (!oh.unique) { | |
dd3bb14f | 1171 | err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); |
8599396b TH |
1172 | return err ? err : nbytes; |
1173 | } | |
1174 | ||
334f485d | 1175 | err = -EINVAL; |
8599396b | 1176 | if (oh.error <= -1000 || oh.error > 0) |
334f485d MS |
1177 | goto err_finish; |
1178 | ||
d7133114 | 1179 | spin_lock(&fc->lock); |
69a53bf2 MS |
1180 | err = -ENOENT; |
1181 | if (!fc->connected) | |
1182 | goto err_unlock; | |
1183 | ||
334f485d | 1184 | req = request_find(fc, oh.unique); |
334f485d MS |
1185 | if (!req) |
1186 | goto err_unlock; | |
1187 | ||
f9a2842e | 1188 | if (req->aborted) { |
d7133114 | 1189 | spin_unlock(&fc->lock); |
dd3bb14f | 1190 | fuse_copy_finish(cs); |
d7133114 | 1191 | spin_lock(&fc->lock); |
222f1d69 | 1192 | request_end(fc, req); |
334f485d MS |
1193 | return -ENOENT; |
1194 | } | |
a4d27e75 MS |
1195 | /* Is it an interrupt reply? */ |
1196 | if (req->intr_unique == oh.unique) { | |
1197 | err = -EINVAL; | |
1198 | if (nbytes != sizeof(struct fuse_out_header)) | |
1199 | goto err_unlock; | |
1200 | ||
1201 | if (oh.error == -ENOSYS) | |
1202 | fc->no_interrupt = 1; | |
1203 | else if (oh.error == -EAGAIN) | |
1204 | queue_interrupt(fc, req); | |
1205 | ||
1206 | spin_unlock(&fc->lock); | |
dd3bb14f | 1207 | fuse_copy_finish(cs); |
a4d27e75 MS |
1208 | return nbytes; |
1209 | } | |
1210 | ||
1211 | req->state = FUSE_REQ_WRITING; | |
d77a1d5b | 1212 | list_move(&req->list, &fc->io); |
334f485d MS |
1213 | req->out.h = oh; |
1214 | req->locked = 1; | |
dd3bb14f | 1215 | cs->req = req; |
ce534fb0 MS |
1216 | if (!req->out.page_replace) |
1217 | cs->move_pages = 0; | |
d7133114 | 1218 | spin_unlock(&fc->lock); |
334f485d | 1219 | |
dd3bb14f MS |
1220 | err = copy_out_args(cs, &req->out, nbytes); |
1221 | fuse_copy_finish(cs); | |
334f485d | 1222 | |
d7133114 | 1223 | spin_lock(&fc->lock); |
334f485d MS |
1224 | req->locked = 0; |
1225 | if (!err) { | |
f9a2842e | 1226 | if (req->aborted) |
334f485d | 1227 | err = -ENOENT; |
f9a2842e | 1228 | } else if (!req->aborted) |
334f485d MS |
1229 | req->out.h.error = -EIO; |
1230 | request_end(fc, req); | |
1231 | ||
1232 | return err ? err : nbytes; | |
1233 | ||
1234 | err_unlock: | |
d7133114 | 1235 | spin_unlock(&fc->lock); |
334f485d | 1236 | err_finish: |
dd3bb14f | 1237 | fuse_copy_finish(cs); |
334f485d MS |
1238 | return err; |
1239 | } | |
1240 | ||
dd3bb14f MS |
1241 | static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, |
1242 | unsigned long nr_segs, loff_t pos) | |
1243 | { | |
1244 | struct fuse_copy_state cs; | |
1245 | struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp); | |
1246 | if (!fc) | |
1247 | return -EPERM; | |
1248 | ||
1249 | fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs); | |
1250 | ||
1251 | return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs)); | |
1252 | } | |
1253 | ||
1254 | static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, | |
1255 | struct file *out, loff_t *ppos, | |
1256 | size_t len, unsigned int flags) | |
1257 | { | |
1258 | unsigned nbuf; | |
1259 | unsigned idx; | |
1260 | struct pipe_buffer *bufs; | |
1261 | struct fuse_copy_state cs; | |
1262 | struct fuse_conn *fc; | |
1263 | size_t rem; | |
1264 | ssize_t ret; | |
1265 | ||
1266 | fc = fuse_get_conn(out); | |
1267 | if (!fc) | |
1268 | return -EPERM; | |
1269 | ||
1270 | bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL); | |
1271 | if (!bufs) | |
1272 | return -ENOMEM; | |
1273 | ||
1274 | pipe_lock(pipe); | |
1275 | nbuf = 0; | |
1276 | rem = 0; | |
1277 | for (idx = 0; idx < pipe->nrbufs && rem < len; idx++) | |
1278 | rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len; | |
1279 | ||
1280 | ret = -EINVAL; | |
1281 | if (rem < len) { | |
1282 | pipe_unlock(pipe); | |
1283 | goto out; | |
1284 | } | |
1285 | ||
1286 | rem = len; | |
1287 | while (rem) { | |
1288 | struct pipe_buffer *ibuf; | |
1289 | struct pipe_buffer *obuf; | |
1290 | ||
1291 | BUG_ON(nbuf >= pipe->buffers); | |
1292 | BUG_ON(!pipe->nrbufs); | |
1293 | ibuf = &pipe->bufs[pipe->curbuf]; | |
1294 | obuf = &bufs[nbuf]; | |
1295 | ||
1296 | if (rem >= ibuf->len) { | |
1297 | *obuf = *ibuf; | |
1298 | ibuf->ops = NULL; | |
1299 | pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); | |
1300 | pipe->nrbufs--; | |
1301 | } else { | |
1302 | ibuf->ops->get(pipe, ibuf); | |
1303 | *obuf = *ibuf; | |
1304 | obuf->flags &= ~PIPE_BUF_FLAG_GIFT; | |
1305 | obuf->len = rem; | |
1306 | ibuf->offset += obuf->len; | |
1307 | ibuf->len -= obuf->len; | |
1308 | } | |
1309 | nbuf++; | |
1310 | rem -= obuf->len; | |
1311 | } | |
1312 | pipe_unlock(pipe); | |
1313 | ||
1314 | memset(&cs, 0, sizeof(struct fuse_copy_state)); | |
1315 | cs.fc = fc; | |
1316 | cs.write = 0; | |
1317 | cs.pipebufs = bufs; | |
1318 | cs.nr_segs = nbuf; | |
1319 | cs.pipe = pipe; | |
1320 | ||
ce534fb0 MS |
1321 | if (flags & SPLICE_F_MOVE) |
1322 | cs.move_pages = 1; | |
1323 | ||
dd3bb14f MS |
1324 | ret = fuse_dev_do_write(fc, &cs, len); |
1325 | ||
1326 | for (idx = 0; idx < nbuf; idx++) { | |
1327 | struct pipe_buffer *buf = &bufs[idx]; | |
1328 | buf->ops->release(pipe, buf); | |
1329 | } | |
1330 | out: | |
1331 | kfree(bufs); | |
1332 | return ret; | |
1333 | } | |
1334 | ||
334f485d MS |
1335 | static unsigned fuse_dev_poll(struct file *file, poll_table *wait) |
1336 | { | |
334f485d | 1337 | unsigned mask = POLLOUT | POLLWRNORM; |
7025d9ad | 1338 | struct fuse_conn *fc = fuse_get_conn(file); |
334f485d | 1339 | if (!fc) |
7025d9ad | 1340 | return POLLERR; |
334f485d MS |
1341 | |
1342 | poll_wait(file, &fc->waitq, wait); | |
1343 | ||
d7133114 | 1344 | spin_lock(&fc->lock); |
7025d9ad MS |
1345 | if (!fc->connected) |
1346 | mask = POLLERR; | |
a4d27e75 | 1347 | else if (request_pending(fc)) |
7025d9ad | 1348 | mask |= POLLIN | POLLRDNORM; |
d7133114 | 1349 | spin_unlock(&fc->lock); |
334f485d MS |
1350 | |
1351 | return mask; | |
1352 | } | |
1353 | ||
69a53bf2 MS |
1354 | /* |
1355 | * Abort all requests on the given list (pending or processing) | |
1356 | * | |
d7133114 | 1357 | * This function releases and reacquires fc->lock |
69a53bf2 | 1358 | */ |
334f485d | 1359 | static void end_requests(struct fuse_conn *fc, struct list_head *head) |
5d9ec854 HH |
1360 | __releases(&fc->lock) |
1361 | __acquires(&fc->lock) | |
334f485d MS |
1362 | { |
1363 | while (!list_empty(head)) { | |
1364 | struct fuse_req *req; | |
1365 | req = list_entry(head->next, struct fuse_req, list); | |
334f485d MS |
1366 | req->out.h.error = -ECONNABORTED; |
1367 | request_end(fc, req); | |
d7133114 | 1368 | spin_lock(&fc->lock); |
334f485d MS |
1369 | } |
1370 | } | |
1371 | ||
69a53bf2 MS |
1372 | /* |
1373 | * Abort requests under I/O | |
1374 | * | |
f9a2842e | 1375 | * The requests are set to aborted and finished, and the request |
69a53bf2 MS |
1376 | * waiter is woken up. This will make request_wait_answer() wait |
1377 | * until the request is unlocked and then return. | |
64c6d8ed MS |
1378 | * |
1379 | * If the request is asynchronous, then the end function needs to be | |
1380 | * called after waiting for the request to be unlocked (if it was | |
1381 | * locked). | |
69a53bf2 MS |
1382 | */ |
1383 | static void end_io_requests(struct fuse_conn *fc) | |
5d9ec854 HH |
1384 | __releases(&fc->lock) |
1385 | __acquires(&fc->lock) | |
69a53bf2 MS |
1386 | { |
1387 | while (!list_empty(&fc->io)) { | |
64c6d8ed MS |
1388 | struct fuse_req *req = |
1389 | list_entry(fc->io.next, struct fuse_req, list); | |
1390 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; | |
1391 | ||
f9a2842e | 1392 | req->aborted = 1; |
69a53bf2 MS |
1393 | req->out.h.error = -ECONNABORTED; |
1394 | req->state = FUSE_REQ_FINISHED; | |
1395 | list_del_init(&req->list); | |
1396 | wake_up(&req->waitq); | |
64c6d8ed MS |
1397 | if (end) { |
1398 | req->end = NULL; | |
64c6d8ed | 1399 | __fuse_get_request(req); |
d7133114 | 1400 | spin_unlock(&fc->lock); |
64c6d8ed MS |
1401 | wait_event(req->waitq, !req->locked); |
1402 | end(fc, req); | |
e9bb09dd | 1403 | fuse_put_request(fc, req); |
d7133114 | 1404 | spin_lock(&fc->lock); |
64c6d8ed | 1405 | } |
69a53bf2 MS |
1406 | } |
1407 | } | |
1408 | ||
1409 | /* | |
1410 | * Abort all requests. | |
1411 | * | |
1412 | * Emergency exit in case of a malicious or accidental deadlock, or | |
1413 | * just a hung filesystem. | |
1414 | * | |
1415 | * The same effect is usually achievable through killing the | |
1416 | * filesystem daemon and all users of the filesystem. The exception | |
1417 | * is the combination of an asynchronous request and the tricky | |
1418 | * deadlock (see Documentation/filesystems/fuse.txt). | |
1419 | * | |
1420 | * During the aborting, progression of requests from the pending and | |
1421 | * processing lists onto the io list, and progression of new requests | |
1422 | * onto the pending list is prevented by req->connected being false. | |
1423 | * | |
1424 | * Progression of requests under I/O to the processing list is | |
f9a2842e MS |
1425 | * prevented by the req->aborted flag being true for these requests. |
1426 | * For this reason requests on the io list must be aborted first. | |
69a53bf2 MS |
1427 | */ |
1428 | void fuse_abort_conn(struct fuse_conn *fc) | |
1429 | { | |
d7133114 | 1430 | spin_lock(&fc->lock); |
69a53bf2 MS |
1431 | if (fc->connected) { |
1432 | fc->connected = 0; | |
51eb01e7 | 1433 | fc->blocked = 0; |
69a53bf2 MS |
1434 | end_io_requests(fc); |
1435 | end_requests(fc, &fc->pending); | |
1436 | end_requests(fc, &fc->processing); | |
1437 | wake_up_all(&fc->waitq); | |
51eb01e7 | 1438 | wake_up_all(&fc->blocked_waitq); |
385a17bf | 1439 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
69a53bf2 | 1440 | } |
d7133114 | 1441 | spin_unlock(&fc->lock); |
69a53bf2 | 1442 | } |
08cbf542 | 1443 | EXPORT_SYMBOL_GPL(fuse_abort_conn); |
69a53bf2 | 1444 | |
08cbf542 | 1445 | int fuse_dev_release(struct inode *inode, struct file *file) |
334f485d | 1446 | { |
0720b315 | 1447 | struct fuse_conn *fc = fuse_get_conn(file); |
334f485d | 1448 | if (fc) { |
d7133114 | 1449 | spin_lock(&fc->lock); |
1e9a4ed9 | 1450 | fc->connected = 0; |
334f485d MS |
1451 | end_requests(fc, &fc->pending); |
1452 | end_requests(fc, &fc->processing); | |
d7133114 | 1453 | spin_unlock(&fc->lock); |
bafa9654 | 1454 | fuse_conn_put(fc); |
385a17bf | 1455 | } |
f543f253 | 1456 | |
334f485d MS |
1457 | return 0; |
1458 | } | |
08cbf542 | 1459 | EXPORT_SYMBOL_GPL(fuse_dev_release); |
334f485d | 1460 | |
385a17bf JD |
1461 | static int fuse_dev_fasync(int fd, struct file *file, int on) |
1462 | { | |
1463 | struct fuse_conn *fc = fuse_get_conn(file); | |
1464 | if (!fc) | |
a87046d8 | 1465 | return -EPERM; |
385a17bf JD |
1466 | |
1467 | /* No locking - fasync_helper does its own locking */ | |
1468 | return fasync_helper(fd, file, on, &fc->fasync); | |
1469 | } | |
1470 | ||
4b6f5d20 | 1471 | const struct file_operations fuse_dev_operations = { |
334f485d MS |
1472 | .owner = THIS_MODULE, |
1473 | .llseek = no_llseek, | |
ee0b3e67 BP |
1474 | .read = do_sync_read, |
1475 | .aio_read = fuse_dev_read, | |
1476 | .write = do_sync_write, | |
1477 | .aio_write = fuse_dev_write, | |
dd3bb14f | 1478 | .splice_write = fuse_dev_splice_write, |
334f485d MS |
1479 | .poll = fuse_dev_poll, |
1480 | .release = fuse_dev_release, | |
385a17bf | 1481 | .fasync = fuse_dev_fasync, |
334f485d | 1482 | }; |
08cbf542 | 1483 | EXPORT_SYMBOL_GPL(fuse_dev_operations); |
334f485d MS |
1484 | |
1485 | static struct miscdevice fuse_miscdevice = { | |
1486 | .minor = FUSE_MINOR, | |
1487 | .name = "fuse", | |
1488 | .fops = &fuse_dev_operations, | |
1489 | }; | |
1490 | ||
1491 | int __init fuse_dev_init(void) | |
1492 | { | |
1493 | int err = -ENOMEM; | |
1494 | fuse_req_cachep = kmem_cache_create("fuse_request", | |
1495 | sizeof(struct fuse_req), | |
20c2df83 | 1496 | 0, 0, NULL); |
334f485d MS |
1497 | if (!fuse_req_cachep) |
1498 | goto out; | |
1499 | ||
1500 | err = misc_register(&fuse_miscdevice); | |
1501 | if (err) | |
1502 | goto out_cache_clean; | |
1503 | ||
1504 | return 0; | |
1505 | ||
1506 | out_cache_clean: | |
1507 | kmem_cache_destroy(fuse_req_cachep); | |
1508 | out: | |
1509 | return err; | |
1510 | } | |
1511 | ||
1512 | void fuse_dev_cleanup(void) | |
1513 | { | |
1514 | misc_deregister(&fuse_miscdevice); | |
1515 | kmem_cache_destroy(fuse_req_cachep); | |
1516 | } |