]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/fuse/dir.c
Merge branch 'common/core' into sh-latest
[mirror_ubuntu-zesty-kernel.git] / fs / fuse / dir.c
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/pagemap.h>
12 #include <linux/file.h>
13 #include <linux/sched.h>
14 #include <linux/namei.h>
15 #include <linux/slab.h>
16
17 #if BITS_PER_LONG >= 64
18 static inline void fuse_dentry_settime(struct dentry *entry, u64 time)
19 {
20 entry->d_time = time;
21 }
22
23 static inline u64 fuse_dentry_time(struct dentry *entry)
24 {
25 return entry->d_time;
26 }
27 #else
28 /*
29 * On 32 bit archs store the high 32 bits of time in d_fsdata
30 */
31 static void fuse_dentry_settime(struct dentry *entry, u64 time)
32 {
33 entry->d_time = time;
34 entry->d_fsdata = (void *) (unsigned long) (time >> 32);
35 }
36
37 static u64 fuse_dentry_time(struct dentry *entry)
38 {
39 return (u64) entry->d_time +
40 ((u64) (unsigned long) entry->d_fsdata << 32);
41 }
42 #endif
43
44 /*
45 * FUSE caches dentries and attributes with separate timeout. The
46 * time in jiffies until the dentry/attributes are valid is stored in
47 * dentry->d_time and fuse_inode->i_time respectively.
48 */
49
50 /*
51 * Calculate the time in jiffies until a dentry/attributes are valid
52 */
53 static u64 time_to_jiffies(unsigned long sec, unsigned long nsec)
54 {
55 if (sec || nsec) {
56 struct timespec ts = {sec, nsec};
57 return get_jiffies_64() + timespec_to_jiffies(&ts);
58 } else
59 return 0;
60 }
61
62 /*
63 * Set dentry and possibly attribute timeouts from the lookup/mk*
64 * replies
65 */
66 static void fuse_change_entry_timeout(struct dentry *entry,
67 struct fuse_entry_out *o)
68 {
69 fuse_dentry_settime(entry,
70 time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
71 }
72
73 static u64 attr_timeout(struct fuse_attr_out *o)
74 {
75 return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
76 }
77
78 static u64 entry_attr_timeout(struct fuse_entry_out *o)
79 {
80 return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
81 }
82
83 /*
84 * Mark the attributes as stale, so that at the next call to
85 * ->getattr() they will be fetched from userspace
86 */
87 void fuse_invalidate_attr(struct inode *inode)
88 {
89 get_fuse_inode(inode)->i_time = 0;
90 }
91
92 /*
93 * Just mark the entry as stale, so that a next attempt to look it up
94 * will result in a new lookup call to userspace
95 *
96 * This is called when a dentry is about to become negative and the
97 * timeout is unknown (unlink, rmdir, rename and in some cases
98 * lookup)
99 */
100 void fuse_invalidate_entry_cache(struct dentry *entry)
101 {
102 fuse_dentry_settime(entry, 0);
103 }
104
105 /*
106 * Same as fuse_invalidate_entry_cache(), but also try to remove the
107 * dentry from the hash
108 */
109 static void fuse_invalidate_entry(struct dentry *entry)
110 {
111 d_invalidate(entry);
112 fuse_invalidate_entry_cache(entry);
113 }
114
115 static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_req *req,
116 u64 nodeid, struct qstr *name,
117 struct fuse_entry_out *outarg)
118 {
119 memset(outarg, 0, sizeof(struct fuse_entry_out));
120 req->in.h.opcode = FUSE_LOOKUP;
121 req->in.h.nodeid = nodeid;
122 req->in.numargs = 1;
123 req->in.args[0].size = name->len + 1;
124 req->in.args[0].value = name->name;
125 req->out.numargs = 1;
126 if (fc->minor < 9)
127 req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
128 else
129 req->out.args[0].size = sizeof(struct fuse_entry_out);
130 req->out.args[0].value = outarg;
131 }
132
133 u64 fuse_get_attr_version(struct fuse_conn *fc)
134 {
135 u64 curr_version;
136
137 /*
138 * The spin lock isn't actually needed on 64bit archs, but we
139 * don't yet care too much about such optimizations.
140 */
141 spin_lock(&fc->lock);
142 curr_version = fc->attr_version;
143 spin_unlock(&fc->lock);
144
145 return curr_version;
146 }
147
148 /*
149 * Check whether the dentry is still valid
150 *
151 * If the entry validity timeout has expired and the dentry is
152 * positive, try to redo the lookup. If the lookup results in a
153 * different inode, then let the VFS invalidate the dentry and redo
154 * the lookup once more. If the lookup results in the same inode,
155 * then refresh the attributes, timeouts and mark the dentry valid.
156 */
157 static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
158 {
159 struct inode *inode;
160
161 inode = ACCESS_ONCE(entry->d_inode);
162 if (inode && is_bad_inode(inode))
163 return 0;
164 else if (fuse_dentry_time(entry) < get_jiffies_64()) {
165 int err;
166 struct fuse_entry_out outarg;
167 struct fuse_conn *fc;
168 struct fuse_req *req;
169 struct fuse_forget_link *forget;
170 struct dentry *parent;
171 u64 attr_version;
172
173 /* For negative dentries, always do a fresh lookup */
174 if (!inode)
175 return 0;
176
177 if (nd && (nd->flags & LOOKUP_RCU))
178 return -ECHILD;
179
180 fc = get_fuse_conn(inode);
181 req = fuse_get_req(fc);
182 if (IS_ERR(req))
183 return 0;
184
185 forget = fuse_alloc_forget();
186 if (!forget) {
187 fuse_put_request(fc, req);
188 return 0;
189 }
190
191 attr_version = fuse_get_attr_version(fc);
192
193 parent = dget_parent(entry);
194 fuse_lookup_init(fc, req, get_node_id(parent->d_inode),
195 &entry->d_name, &outarg);
196 fuse_request_send(fc, req);
197 dput(parent);
198 err = req->out.h.error;
199 fuse_put_request(fc, req);
200 /* Zero nodeid is same as -ENOENT */
201 if (!err && !outarg.nodeid)
202 err = -ENOENT;
203 if (!err) {
204 struct fuse_inode *fi = get_fuse_inode(inode);
205 if (outarg.nodeid != get_node_id(inode)) {
206 fuse_queue_forget(fc, forget, outarg.nodeid, 1);
207 return 0;
208 }
209 spin_lock(&fc->lock);
210 fi->nlookup++;
211 spin_unlock(&fc->lock);
212 }
213 kfree(forget);
214 if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
215 return 0;
216
217 fuse_change_attributes(inode, &outarg.attr,
218 entry_attr_timeout(&outarg),
219 attr_version);
220 fuse_change_entry_timeout(entry, &outarg);
221 }
222 return 1;
223 }
224
225 static int invalid_nodeid(u64 nodeid)
226 {
227 return !nodeid || nodeid == FUSE_ROOT_ID;
228 }
229
230 const struct dentry_operations fuse_dentry_operations = {
231 .d_revalidate = fuse_dentry_revalidate,
232 };
233
234 int fuse_valid_type(int m)
235 {
236 return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
237 S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
238 }
239
240 /*
241 * Add a directory inode to a dentry, ensuring that no other dentry
242 * refers to this inode. Called with fc->inst_mutex.
243 */
244 static struct dentry *fuse_d_add_directory(struct dentry *entry,
245 struct inode *inode)
246 {
247 struct dentry *alias = d_find_alias(inode);
248 if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) {
249 /* This tries to shrink the subtree below alias */
250 fuse_invalidate_entry(alias);
251 dput(alias);
252 if (!list_empty(&inode->i_dentry))
253 return ERR_PTR(-EBUSY);
254 } else {
255 dput(alias);
256 }
257 return d_splice_alias(inode, entry);
258 }
259
260 int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
261 struct fuse_entry_out *outarg, struct inode **inode)
262 {
263 struct fuse_conn *fc = get_fuse_conn_super(sb);
264 struct fuse_req *req;
265 struct fuse_forget_link *forget;
266 u64 attr_version;
267 int err;
268
269 *inode = NULL;
270 err = -ENAMETOOLONG;
271 if (name->len > FUSE_NAME_MAX)
272 goto out;
273
274 req = fuse_get_req(fc);
275 err = PTR_ERR(req);
276 if (IS_ERR(req))
277 goto out;
278
279 forget = fuse_alloc_forget();
280 err = -ENOMEM;
281 if (!forget) {
282 fuse_put_request(fc, req);
283 goto out;
284 }
285
286 attr_version = fuse_get_attr_version(fc);
287
288 fuse_lookup_init(fc, req, nodeid, name, outarg);
289 fuse_request_send(fc, req);
290 err = req->out.h.error;
291 fuse_put_request(fc, req);
292 /* Zero nodeid is same as -ENOENT, but with valid timeout */
293 if (err || !outarg->nodeid)
294 goto out_put_forget;
295
296 err = -EIO;
297 if (!outarg->nodeid)
298 goto out_put_forget;
299 if (!fuse_valid_type(outarg->attr.mode))
300 goto out_put_forget;
301
302 *inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
303 &outarg->attr, entry_attr_timeout(outarg),
304 attr_version);
305 err = -ENOMEM;
306 if (!*inode) {
307 fuse_queue_forget(fc, forget, outarg->nodeid, 1);
308 goto out;
309 }
310 err = 0;
311
312 out_put_forget:
313 kfree(forget);
314 out:
315 return err;
316 }
317
318 static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
319 struct nameidata *nd)
320 {
321 int err;
322 struct fuse_entry_out outarg;
323 struct inode *inode;
324 struct dentry *newent;
325 struct fuse_conn *fc = get_fuse_conn(dir);
326 bool outarg_valid = true;
327
328 err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
329 &outarg, &inode);
330 if (err == -ENOENT) {
331 outarg_valid = false;
332 err = 0;
333 }
334 if (err)
335 goto out_err;
336
337 err = -EIO;
338 if (inode && get_node_id(inode) == FUSE_ROOT_ID)
339 goto out_iput;
340
341 if (inode && S_ISDIR(inode->i_mode)) {
342 mutex_lock(&fc->inst_mutex);
343 newent = fuse_d_add_directory(entry, inode);
344 mutex_unlock(&fc->inst_mutex);
345 err = PTR_ERR(newent);
346 if (IS_ERR(newent))
347 goto out_iput;
348 } else {
349 newent = d_splice_alias(inode, entry);
350 }
351
352 entry = newent ? newent : entry;
353 if (outarg_valid)
354 fuse_change_entry_timeout(entry, &outarg);
355 else
356 fuse_invalidate_entry_cache(entry);
357
358 return newent;
359
360 out_iput:
361 iput(inode);
362 out_err:
363 return ERR_PTR(err);
364 }
365
366 /*
367 * Atomic create+open operation
368 *
369 * If the filesystem doesn't support this, then fall back to separate
370 * 'mknod' + 'open' requests.
371 */
372 static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
373 struct nameidata *nd)
374 {
375 int err;
376 struct inode *inode;
377 struct fuse_conn *fc = get_fuse_conn(dir);
378 struct fuse_req *req;
379 struct fuse_forget_link *forget;
380 struct fuse_create_in inarg;
381 struct fuse_open_out outopen;
382 struct fuse_entry_out outentry;
383 struct fuse_file *ff;
384 struct file *file;
385 int flags = nd->intent.open.flags;
386
387 if (fc->no_create)
388 return -ENOSYS;
389
390 if (flags & O_DIRECT)
391 return -EINVAL;
392
393 forget = fuse_alloc_forget();
394 if (!forget)
395 return -ENOMEM;
396
397 req = fuse_get_req(fc);
398 err = PTR_ERR(req);
399 if (IS_ERR(req))
400 goto out_put_forget_req;
401
402 err = -ENOMEM;
403 ff = fuse_file_alloc(fc);
404 if (!ff)
405 goto out_put_request;
406
407 if (!fc->dont_mask)
408 mode &= ~current_umask();
409
410 flags &= ~O_NOCTTY;
411 memset(&inarg, 0, sizeof(inarg));
412 memset(&outentry, 0, sizeof(outentry));
413 inarg.flags = flags;
414 inarg.mode = mode;
415 inarg.umask = current_umask();
416 req->in.h.opcode = FUSE_CREATE;
417 req->in.h.nodeid = get_node_id(dir);
418 req->in.numargs = 2;
419 req->in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
420 sizeof(inarg);
421 req->in.args[0].value = &inarg;
422 req->in.args[1].size = entry->d_name.len + 1;
423 req->in.args[1].value = entry->d_name.name;
424 req->out.numargs = 2;
425 if (fc->minor < 9)
426 req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
427 else
428 req->out.args[0].size = sizeof(outentry);
429 req->out.args[0].value = &outentry;
430 req->out.args[1].size = sizeof(outopen);
431 req->out.args[1].value = &outopen;
432 fuse_request_send(fc, req);
433 err = req->out.h.error;
434 if (err) {
435 if (err == -ENOSYS)
436 fc->no_create = 1;
437 goto out_free_ff;
438 }
439
440 err = -EIO;
441 if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
442 goto out_free_ff;
443
444 fuse_put_request(fc, req);
445 ff->fh = outopen.fh;
446 ff->nodeid = outentry.nodeid;
447 ff->open_flags = outopen.open_flags;
448 inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
449 &outentry.attr, entry_attr_timeout(&outentry), 0);
450 if (!inode) {
451 flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
452 fuse_sync_release(ff, flags);
453 fuse_queue_forget(fc, forget, outentry.nodeid, 1);
454 return -ENOMEM;
455 }
456 kfree(forget);
457 d_instantiate(entry, inode);
458 fuse_change_entry_timeout(entry, &outentry);
459 fuse_invalidate_attr(dir);
460 file = lookup_instantiate_filp(nd, entry, generic_file_open);
461 if (IS_ERR(file)) {
462 fuse_sync_release(ff, flags);
463 return PTR_ERR(file);
464 }
465 file->private_data = fuse_file_get(ff);
466 fuse_finish_open(inode, file);
467 return 0;
468
469 out_free_ff:
470 fuse_file_free(ff);
471 out_put_request:
472 fuse_put_request(fc, req);
473 out_put_forget_req:
474 kfree(forget);
475 return err;
476 }
477
478 /*
479 * Code shared between mknod, mkdir, symlink and link
480 */
481 static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
482 struct inode *dir, struct dentry *entry,
483 int mode)
484 {
485 struct fuse_entry_out outarg;
486 struct inode *inode;
487 int err;
488 struct fuse_forget_link *forget;
489
490 forget = fuse_alloc_forget();
491 if (!forget) {
492 fuse_put_request(fc, req);
493 return -ENOMEM;
494 }
495
496 memset(&outarg, 0, sizeof(outarg));
497 req->in.h.nodeid = get_node_id(dir);
498 req->out.numargs = 1;
499 if (fc->minor < 9)
500 req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
501 else
502 req->out.args[0].size = sizeof(outarg);
503 req->out.args[0].value = &outarg;
504 fuse_request_send(fc, req);
505 err = req->out.h.error;
506 fuse_put_request(fc, req);
507 if (err)
508 goto out_put_forget_req;
509
510 err = -EIO;
511 if (invalid_nodeid(outarg.nodeid))
512 goto out_put_forget_req;
513
514 if ((outarg.attr.mode ^ mode) & S_IFMT)
515 goto out_put_forget_req;
516
517 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
518 &outarg.attr, entry_attr_timeout(&outarg), 0);
519 if (!inode) {
520 fuse_queue_forget(fc, forget, outarg.nodeid, 1);
521 return -ENOMEM;
522 }
523 kfree(forget);
524
525 if (S_ISDIR(inode->i_mode)) {
526 struct dentry *alias;
527 mutex_lock(&fc->inst_mutex);
528 alias = d_find_alias(inode);
529 if (alias) {
530 /* New directory must have moved since mkdir */
531 mutex_unlock(&fc->inst_mutex);
532 dput(alias);
533 iput(inode);
534 return -EBUSY;
535 }
536 d_instantiate(entry, inode);
537 mutex_unlock(&fc->inst_mutex);
538 } else
539 d_instantiate(entry, inode);
540
541 fuse_change_entry_timeout(entry, &outarg);
542 fuse_invalidate_attr(dir);
543 return 0;
544
545 out_put_forget_req:
546 kfree(forget);
547 return err;
548 }
549
550 static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode,
551 dev_t rdev)
552 {
553 struct fuse_mknod_in inarg;
554 struct fuse_conn *fc = get_fuse_conn(dir);
555 struct fuse_req *req = fuse_get_req(fc);
556 if (IS_ERR(req))
557 return PTR_ERR(req);
558
559 if (!fc->dont_mask)
560 mode &= ~current_umask();
561
562 memset(&inarg, 0, sizeof(inarg));
563 inarg.mode = mode;
564 inarg.rdev = new_encode_dev(rdev);
565 inarg.umask = current_umask();
566 req->in.h.opcode = FUSE_MKNOD;
567 req->in.numargs = 2;
568 req->in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
569 sizeof(inarg);
570 req->in.args[0].value = &inarg;
571 req->in.args[1].size = entry->d_name.len + 1;
572 req->in.args[1].value = entry->d_name.name;
573 return create_new_entry(fc, req, dir, entry, mode);
574 }
575
576 static int fuse_create(struct inode *dir, struct dentry *entry, int mode,
577 struct nameidata *nd)
578 {
579 if (nd) {
580 int err = fuse_create_open(dir, entry, mode, nd);
581 if (err != -ENOSYS)
582 return err;
583 /* Fall back on mknod */
584 }
585 return fuse_mknod(dir, entry, mode, 0);
586 }
587
588 static int fuse_mkdir(struct inode *dir, struct dentry *entry, int mode)
589 {
590 struct fuse_mkdir_in inarg;
591 struct fuse_conn *fc = get_fuse_conn(dir);
592 struct fuse_req *req = fuse_get_req(fc);
593 if (IS_ERR(req))
594 return PTR_ERR(req);
595
596 if (!fc->dont_mask)
597 mode &= ~current_umask();
598
599 memset(&inarg, 0, sizeof(inarg));
600 inarg.mode = mode;
601 inarg.umask = current_umask();
602 req->in.h.opcode = FUSE_MKDIR;
603 req->in.numargs = 2;
604 req->in.args[0].size = sizeof(inarg);
605 req->in.args[0].value = &inarg;
606 req->in.args[1].size = entry->d_name.len + 1;
607 req->in.args[1].value = entry->d_name.name;
608 return create_new_entry(fc, req, dir, entry, S_IFDIR);
609 }
610
611 static int fuse_symlink(struct inode *dir, struct dentry *entry,
612 const char *link)
613 {
614 struct fuse_conn *fc = get_fuse_conn(dir);
615 unsigned len = strlen(link) + 1;
616 struct fuse_req *req = fuse_get_req(fc);
617 if (IS_ERR(req))
618 return PTR_ERR(req);
619
620 req->in.h.opcode = FUSE_SYMLINK;
621 req->in.numargs = 2;
622 req->in.args[0].size = entry->d_name.len + 1;
623 req->in.args[0].value = entry->d_name.name;
624 req->in.args[1].size = len;
625 req->in.args[1].value = link;
626 return create_new_entry(fc, req, dir, entry, S_IFLNK);
627 }
628
629 static int fuse_unlink(struct inode *dir, struct dentry *entry)
630 {
631 int err;
632 struct fuse_conn *fc = get_fuse_conn(dir);
633 struct fuse_req *req = fuse_get_req(fc);
634 if (IS_ERR(req))
635 return PTR_ERR(req);
636
637 req->in.h.opcode = FUSE_UNLINK;
638 req->in.h.nodeid = get_node_id(dir);
639 req->in.numargs = 1;
640 req->in.args[0].size = entry->d_name.len + 1;
641 req->in.args[0].value = entry->d_name.name;
642 fuse_request_send(fc, req);
643 err = req->out.h.error;
644 fuse_put_request(fc, req);
645 if (!err) {
646 struct inode *inode = entry->d_inode;
647
648 /*
649 * Set nlink to zero so the inode can be cleared, if the inode
650 * does have more links this will be discovered at the next
651 * lookup/getattr.
652 */
653 clear_nlink(inode);
654 fuse_invalidate_attr(inode);
655 fuse_invalidate_attr(dir);
656 fuse_invalidate_entry_cache(entry);
657 } else if (err == -EINTR)
658 fuse_invalidate_entry(entry);
659 return err;
660 }
661
662 static int fuse_rmdir(struct inode *dir, struct dentry *entry)
663 {
664 int err;
665 struct fuse_conn *fc = get_fuse_conn(dir);
666 struct fuse_req *req = fuse_get_req(fc);
667 if (IS_ERR(req))
668 return PTR_ERR(req);
669
670 req->in.h.opcode = FUSE_RMDIR;
671 req->in.h.nodeid = get_node_id(dir);
672 req->in.numargs = 1;
673 req->in.args[0].size = entry->d_name.len + 1;
674 req->in.args[0].value = entry->d_name.name;
675 fuse_request_send(fc, req);
676 err = req->out.h.error;
677 fuse_put_request(fc, req);
678 if (!err) {
679 clear_nlink(entry->d_inode);
680 fuse_invalidate_attr(dir);
681 fuse_invalidate_entry_cache(entry);
682 } else if (err == -EINTR)
683 fuse_invalidate_entry(entry);
684 return err;
685 }
686
687 static int fuse_rename(struct inode *olddir, struct dentry *oldent,
688 struct inode *newdir, struct dentry *newent)
689 {
690 int err;
691 struct fuse_rename_in inarg;
692 struct fuse_conn *fc = get_fuse_conn(olddir);
693 struct fuse_req *req = fuse_get_req(fc);
694
695 if (IS_ERR(req))
696 return PTR_ERR(req);
697
698 memset(&inarg, 0, sizeof(inarg));
699 inarg.newdir = get_node_id(newdir);
700 req->in.h.opcode = FUSE_RENAME;
701 req->in.h.nodeid = get_node_id(olddir);
702 req->in.numargs = 3;
703 req->in.args[0].size = sizeof(inarg);
704 req->in.args[0].value = &inarg;
705 req->in.args[1].size = oldent->d_name.len + 1;
706 req->in.args[1].value = oldent->d_name.name;
707 req->in.args[2].size = newent->d_name.len + 1;
708 req->in.args[2].value = newent->d_name.name;
709 fuse_request_send(fc, req);
710 err = req->out.h.error;
711 fuse_put_request(fc, req);
712 if (!err) {
713 /* ctime changes */
714 fuse_invalidate_attr(oldent->d_inode);
715
716 fuse_invalidate_attr(olddir);
717 if (olddir != newdir)
718 fuse_invalidate_attr(newdir);
719
720 /* newent will end up negative */
721 if (newent->d_inode) {
722 fuse_invalidate_attr(newent->d_inode);
723 fuse_invalidate_entry_cache(newent);
724 }
725 } else if (err == -EINTR) {
726 /* If request was interrupted, DEITY only knows if the
727 rename actually took place. If the invalidation
728 fails (e.g. some process has CWD under the renamed
729 directory), then there can be inconsistency between
730 the dcache and the real filesystem. Tough luck. */
731 fuse_invalidate_entry(oldent);
732 if (newent->d_inode)
733 fuse_invalidate_entry(newent);
734 }
735
736 return err;
737 }
738
739 static int fuse_link(struct dentry *entry, struct inode *newdir,
740 struct dentry *newent)
741 {
742 int err;
743 struct fuse_link_in inarg;
744 struct inode *inode = entry->d_inode;
745 struct fuse_conn *fc = get_fuse_conn(inode);
746 struct fuse_req *req = fuse_get_req(fc);
747 if (IS_ERR(req))
748 return PTR_ERR(req);
749
750 memset(&inarg, 0, sizeof(inarg));
751 inarg.oldnodeid = get_node_id(inode);
752 req->in.h.opcode = FUSE_LINK;
753 req->in.numargs = 2;
754 req->in.args[0].size = sizeof(inarg);
755 req->in.args[0].value = &inarg;
756 req->in.args[1].size = newent->d_name.len + 1;
757 req->in.args[1].value = newent->d_name.name;
758 err = create_new_entry(fc, req, newdir, newent, inode->i_mode);
759 /* Contrary to "normal" filesystems it can happen that link
760 makes two "logical" inodes point to the same "physical"
761 inode. We invalidate the attributes of the old one, so it
762 will reflect changes in the backing inode (link count,
763 etc.)
764 */
765 if (!err || err == -EINTR)
766 fuse_invalidate_attr(inode);
767 return err;
768 }
769
770 static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
771 struct kstat *stat)
772 {
773 stat->dev = inode->i_sb->s_dev;
774 stat->ino = attr->ino;
775 stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
776 stat->nlink = attr->nlink;
777 stat->uid = attr->uid;
778 stat->gid = attr->gid;
779 stat->rdev = inode->i_rdev;
780 stat->atime.tv_sec = attr->atime;
781 stat->atime.tv_nsec = attr->atimensec;
782 stat->mtime.tv_sec = attr->mtime;
783 stat->mtime.tv_nsec = attr->mtimensec;
784 stat->ctime.tv_sec = attr->ctime;
785 stat->ctime.tv_nsec = attr->ctimensec;
786 stat->size = attr->size;
787 stat->blocks = attr->blocks;
788 stat->blksize = (1 << inode->i_blkbits);
789 }
790
791 static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
792 struct file *file)
793 {
794 int err;
795 struct fuse_getattr_in inarg;
796 struct fuse_attr_out outarg;
797 struct fuse_conn *fc = get_fuse_conn(inode);
798 struct fuse_req *req;
799 u64 attr_version;
800
801 req = fuse_get_req(fc);
802 if (IS_ERR(req))
803 return PTR_ERR(req);
804
805 attr_version = fuse_get_attr_version(fc);
806
807 memset(&inarg, 0, sizeof(inarg));
808 memset(&outarg, 0, sizeof(outarg));
809 /* Directories have separate file-handle space */
810 if (file && S_ISREG(inode->i_mode)) {
811 struct fuse_file *ff = file->private_data;
812
813 inarg.getattr_flags |= FUSE_GETATTR_FH;
814 inarg.fh = ff->fh;
815 }
816 req->in.h.opcode = FUSE_GETATTR;
817 req->in.h.nodeid = get_node_id(inode);
818 req->in.numargs = 1;
819 req->in.args[0].size = sizeof(inarg);
820 req->in.args[0].value = &inarg;
821 req->out.numargs = 1;
822 if (fc->minor < 9)
823 req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
824 else
825 req->out.args[0].size = sizeof(outarg);
826 req->out.args[0].value = &outarg;
827 fuse_request_send(fc, req);
828 err = req->out.h.error;
829 fuse_put_request(fc, req);
830 if (!err) {
831 if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
832 make_bad_inode(inode);
833 err = -EIO;
834 } else {
835 fuse_change_attributes(inode, &outarg.attr,
836 attr_timeout(&outarg),
837 attr_version);
838 if (stat)
839 fuse_fillattr(inode, &outarg.attr, stat);
840 }
841 }
842 return err;
843 }
844
845 int fuse_update_attributes(struct inode *inode, struct kstat *stat,
846 struct file *file, bool *refreshed)
847 {
848 struct fuse_inode *fi = get_fuse_inode(inode);
849 int err;
850 bool r;
851
852 if (fi->i_time < get_jiffies_64()) {
853 r = true;
854 err = fuse_do_getattr(inode, stat, file);
855 } else {
856 r = false;
857 err = 0;
858 if (stat) {
859 generic_fillattr(inode, stat);
860 stat->mode = fi->orig_i_mode;
861 }
862 }
863
864 if (refreshed != NULL)
865 *refreshed = r;
866
867 return err;
868 }
869
870 int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
871 struct qstr *name)
872 {
873 int err = -ENOTDIR;
874 struct inode *parent;
875 struct dentry *dir;
876 struct dentry *entry;
877
878 parent = ilookup5(sb, parent_nodeid, fuse_inode_eq, &parent_nodeid);
879 if (!parent)
880 return -ENOENT;
881
882 mutex_lock(&parent->i_mutex);
883 if (!S_ISDIR(parent->i_mode))
884 goto unlock;
885
886 err = -ENOENT;
887 dir = d_find_alias(parent);
888 if (!dir)
889 goto unlock;
890
891 entry = d_lookup(dir, name);
892 dput(dir);
893 if (!entry)
894 goto unlock;
895
896 fuse_invalidate_attr(parent);
897 fuse_invalidate_entry(entry);
898 dput(entry);
899 err = 0;
900
901 unlock:
902 mutex_unlock(&parent->i_mutex);
903 iput(parent);
904 return err;
905 }
906
907 /*
908 * Calling into a user-controlled filesystem gives the filesystem
909 * daemon ptrace-like capabilities over the requester process. This
910 * means, that the filesystem daemon is able to record the exact
911 * filesystem operations performed, and can also control the behavior
912 * of the requester process in otherwise impossible ways. For example
913 * it can delay the operation for arbitrary length of time allowing
914 * DoS against the requester.
915 *
916 * For this reason only those processes can call into the filesystem,
917 * for which the owner of the mount has ptrace privilege. This
918 * excludes processes started by other users, suid or sgid processes.
919 */
920 int fuse_allow_task(struct fuse_conn *fc, struct task_struct *task)
921 {
922 const struct cred *cred;
923 int ret;
924
925 if (fc->flags & FUSE_ALLOW_OTHER)
926 return 1;
927
928 rcu_read_lock();
929 ret = 0;
930 cred = __task_cred(task);
931 if (cred->euid == fc->user_id &&
932 cred->suid == fc->user_id &&
933 cred->uid == fc->user_id &&
934 cred->egid == fc->group_id &&
935 cred->sgid == fc->group_id &&
936 cred->gid == fc->group_id)
937 ret = 1;
938 rcu_read_unlock();
939
940 return ret;
941 }
942
943 static int fuse_access(struct inode *inode, int mask)
944 {
945 struct fuse_conn *fc = get_fuse_conn(inode);
946 struct fuse_req *req;
947 struct fuse_access_in inarg;
948 int err;
949
950 if (fc->no_access)
951 return 0;
952
953 req = fuse_get_req(fc);
954 if (IS_ERR(req))
955 return PTR_ERR(req);
956
957 memset(&inarg, 0, sizeof(inarg));
958 inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
959 req->in.h.opcode = FUSE_ACCESS;
960 req->in.h.nodeid = get_node_id(inode);
961 req->in.numargs = 1;
962 req->in.args[0].size = sizeof(inarg);
963 req->in.args[0].value = &inarg;
964 fuse_request_send(fc, req);
965 err = req->out.h.error;
966 fuse_put_request(fc, req);
967 if (err == -ENOSYS) {
968 fc->no_access = 1;
969 err = 0;
970 }
971 return err;
972 }
973
974 static int fuse_perm_getattr(struct inode *inode, int mask)
975 {
976 if (mask & MAY_NOT_BLOCK)
977 return -ECHILD;
978
979 return fuse_do_getattr(inode, NULL, NULL);
980 }
981
982 /*
983 * Check permission. The two basic access models of FUSE are:
984 *
985 * 1) Local access checking ('default_permissions' mount option) based
986 * on file mode. This is the plain old disk filesystem permission
987 * modell.
988 *
989 * 2) "Remote" access checking, where server is responsible for
990 * checking permission in each inode operation. An exception to this
991 * is if ->permission() was invoked from sys_access() in which case an
992 * access request is sent. Execute permission is still checked
993 * locally based on file mode.
994 */
995 static int fuse_permission(struct inode *inode, int mask)
996 {
997 struct fuse_conn *fc = get_fuse_conn(inode);
998 bool refreshed = false;
999 int err = 0;
1000
1001 if (!fuse_allow_task(fc, current))
1002 return -EACCES;
1003
1004 /*
1005 * If attributes are needed, refresh them before proceeding
1006 */
1007 if ((fc->flags & FUSE_DEFAULT_PERMISSIONS) ||
1008 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1009 struct fuse_inode *fi = get_fuse_inode(inode);
1010
1011 if (fi->i_time < get_jiffies_64()) {
1012 refreshed = true;
1013
1014 err = fuse_perm_getattr(inode, mask);
1015 if (err)
1016 return err;
1017 }
1018 }
1019
1020 if (fc->flags & FUSE_DEFAULT_PERMISSIONS) {
1021 err = generic_permission(inode, mask);
1022
1023 /* If permission is denied, try to refresh file
1024 attributes. This is also needed, because the root
1025 node will at first have no permissions */
1026 if (err == -EACCES && !refreshed) {
1027 err = fuse_perm_getattr(inode, mask);
1028 if (!err)
1029 err = generic_permission(inode, mask);
1030 }
1031
1032 /* Note: the opposite of the above test does not
1033 exist. So if permissions are revoked this won't be
1034 noticed immediately, only after the attribute
1035 timeout has expired */
1036 } else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
1037 if (mask & MAY_NOT_BLOCK)
1038 return -ECHILD;
1039
1040 err = fuse_access(inode, mask);
1041 } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
1042 if (!(inode->i_mode & S_IXUGO)) {
1043 if (refreshed)
1044 return -EACCES;
1045
1046 err = fuse_perm_getattr(inode, mask);
1047 if (!err && !(inode->i_mode & S_IXUGO))
1048 return -EACCES;
1049 }
1050 }
1051 return err;
1052 }
1053
1054 static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
1055 void *dstbuf, filldir_t filldir)
1056 {
1057 while (nbytes >= FUSE_NAME_OFFSET) {
1058 struct fuse_dirent *dirent = (struct fuse_dirent *) buf;
1059 size_t reclen = FUSE_DIRENT_SIZE(dirent);
1060 int over;
1061 if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
1062 return -EIO;
1063 if (reclen > nbytes)
1064 break;
1065
1066 over = filldir(dstbuf, dirent->name, dirent->namelen,
1067 file->f_pos, dirent->ino, dirent->type);
1068 if (over)
1069 break;
1070
1071 buf += reclen;
1072 nbytes -= reclen;
1073 file->f_pos = dirent->off;
1074 }
1075
1076 return 0;
1077 }
1078
1079 static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
1080 {
1081 int err;
1082 size_t nbytes;
1083 struct page *page;
1084 struct inode *inode = file->f_path.dentry->d_inode;
1085 struct fuse_conn *fc = get_fuse_conn(inode);
1086 struct fuse_req *req;
1087
1088 if (is_bad_inode(inode))
1089 return -EIO;
1090
1091 req = fuse_get_req(fc);
1092 if (IS_ERR(req))
1093 return PTR_ERR(req);
1094
1095 page = alloc_page(GFP_KERNEL);
1096 if (!page) {
1097 fuse_put_request(fc, req);
1098 return -ENOMEM;
1099 }
1100 req->out.argpages = 1;
1101 req->num_pages = 1;
1102 req->pages[0] = page;
1103 fuse_read_fill(req, file, file->f_pos, PAGE_SIZE, FUSE_READDIR);
1104 fuse_request_send(fc, req);
1105 nbytes = req->out.args[0].size;
1106 err = req->out.h.error;
1107 fuse_put_request(fc, req);
1108 if (!err)
1109 err = parse_dirfile(page_address(page), nbytes, file, dstbuf,
1110 filldir);
1111
1112 __free_page(page);
1113 fuse_invalidate_attr(inode); /* atime changed */
1114 return err;
1115 }
1116
1117 static char *read_link(struct dentry *dentry)
1118 {
1119 struct inode *inode = dentry->d_inode;
1120 struct fuse_conn *fc = get_fuse_conn(inode);
1121 struct fuse_req *req = fuse_get_req(fc);
1122 char *link;
1123
1124 if (IS_ERR(req))
1125 return ERR_CAST(req);
1126
1127 link = (char *) __get_free_page(GFP_KERNEL);
1128 if (!link) {
1129 link = ERR_PTR(-ENOMEM);
1130 goto out;
1131 }
1132 req->in.h.opcode = FUSE_READLINK;
1133 req->in.h.nodeid = get_node_id(inode);
1134 req->out.argvar = 1;
1135 req->out.numargs = 1;
1136 req->out.args[0].size = PAGE_SIZE - 1;
1137 req->out.args[0].value = link;
1138 fuse_request_send(fc, req);
1139 if (req->out.h.error) {
1140 free_page((unsigned long) link);
1141 link = ERR_PTR(req->out.h.error);
1142 } else
1143 link[req->out.args[0].size] = '\0';
1144 out:
1145 fuse_put_request(fc, req);
1146 fuse_invalidate_attr(inode); /* atime changed */
1147 return link;
1148 }
1149
1150 static void free_link(char *link)
1151 {
1152 if (!IS_ERR(link))
1153 free_page((unsigned long) link);
1154 }
1155
1156 static void *fuse_follow_link(struct dentry *dentry, struct nameidata *nd)
1157 {
1158 nd_set_link(nd, read_link(dentry));
1159 return NULL;
1160 }
1161
1162 static void fuse_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
1163 {
1164 free_link(nd_get_link(nd));
1165 }
1166
1167 static int fuse_dir_open(struct inode *inode, struct file *file)
1168 {
1169 return fuse_open_common(inode, file, true);
1170 }
1171
1172 static int fuse_dir_release(struct inode *inode, struct file *file)
1173 {
1174 fuse_release_common(file, FUSE_RELEASEDIR);
1175
1176 return 0;
1177 }
1178
1179 static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
1180 int datasync)
1181 {
1182 return fuse_fsync_common(file, start, end, datasync, 1);
1183 }
1184
1185 static bool update_mtime(unsigned ivalid)
1186 {
1187 /* Always update if mtime is explicitly set */
1188 if (ivalid & ATTR_MTIME_SET)
1189 return true;
1190
1191 /* If it's an open(O_TRUNC) or an ftruncate(), don't update */
1192 if ((ivalid & ATTR_SIZE) && (ivalid & (ATTR_OPEN | ATTR_FILE)))
1193 return false;
1194
1195 /* In all other cases update */
1196 return true;
1197 }
1198
1199 static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg)
1200 {
1201 unsigned ivalid = iattr->ia_valid;
1202
1203 if (ivalid & ATTR_MODE)
1204 arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode;
1205 if (ivalid & ATTR_UID)
1206 arg->valid |= FATTR_UID, arg->uid = iattr->ia_uid;
1207 if (ivalid & ATTR_GID)
1208 arg->valid |= FATTR_GID, arg->gid = iattr->ia_gid;
1209 if (ivalid & ATTR_SIZE)
1210 arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size;
1211 if (ivalid & ATTR_ATIME) {
1212 arg->valid |= FATTR_ATIME;
1213 arg->atime = iattr->ia_atime.tv_sec;
1214 arg->atimensec = iattr->ia_atime.tv_nsec;
1215 if (!(ivalid & ATTR_ATIME_SET))
1216 arg->valid |= FATTR_ATIME_NOW;
1217 }
1218 if ((ivalid & ATTR_MTIME) && update_mtime(ivalid)) {
1219 arg->valid |= FATTR_MTIME;
1220 arg->mtime = iattr->ia_mtime.tv_sec;
1221 arg->mtimensec = iattr->ia_mtime.tv_nsec;
1222 if (!(ivalid & ATTR_MTIME_SET))
1223 arg->valid |= FATTR_MTIME_NOW;
1224 }
1225 }
1226
1227 /*
1228 * Prevent concurrent writepages on inode
1229 *
1230 * This is done by adding a negative bias to the inode write counter
1231 * and waiting for all pending writes to finish.
1232 */
1233 void fuse_set_nowrite(struct inode *inode)
1234 {
1235 struct fuse_conn *fc = get_fuse_conn(inode);
1236 struct fuse_inode *fi = get_fuse_inode(inode);
1237
1238 BUG_ON(!mutex_is_locked(&inode->i_mutex));
1239
1240 spin_lock(&fc->lock);
1241 BUG_ON(fi->writectr < 0);
1242 fi->writectr += FUSE_NOWRITE;
1243 spin_unlock(&fc->lock);
1244 wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
1245 }
1246
1247 /*
1248 * Allow writepages on inode
1249 *
1250 * Remove the bias from the writecounter and send any queued
1251 * writepages.
1252 */
1253 static void __fuse_release_nowrite(struct inode *inode)
1254 {
1255 struct fuse_inode *fi = get_fuse_inode(inode);
1256
1257 BUG_ON(fi->writectr != FUSE_NOWRITE);
1258 fi->writectr = 0;
1259 fuse_flush_writepages(inode);
1260 }
1261
1262 void fuse_release_nowrite(struct inode *inode)
1263 {
1264 struct fuse_conn *fc = get_fuse_conn(inode);
1265
1266 spin_lock(&fc->lock);
1267 __fuse_release_nowrite(inode);
1268 spin_unlock(&fc->lock);
1269 }
1270
1271 /*
1272 * Set attributes, and at the same time refresh them.
1273 *
1274 * Truncation is slightly complicated, because the 'truncate' request
1275 * may fail, in which case we don't want to touch the mapping.
1276 * vmtruncate() doesn't allow for this case, so do the rlimit checking
1277 * and the actual truncation by hand.
1278 */
1279 static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
1280 struct file *file)
1281 {
1282 struct inode *inode = entry->d_inode;
1283 struct fuse_conn *fc = get_fuse_conn(inode);
1284 struct fuse_req *req;
1285 struct fuse_setattr_in inarg;
1286 struct fuse_attr_out outarg;
1287 bool is_truncate = false;
1288 loff_t oldsize;
1289 int err;
1290
1291 if (!fuse_allow_task(fc, current))
1292 return -EACCES;
1293
1294 if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
1295 attr->ia_valid |= ATTR_FORCE;
1296
1297 err = inode_change_ok(inode, attr);
1298 if (err)
1299 return err;
1300
1301 if (attr->ia_valid & ATTR_OPEN) {
1302 if (fc->atomic_o_trunc)
1303 return 0;
1304 file = NULL;
1305 }
1306
1307 if (attr->ia_valid & ATTR_SIZE)
1308 is_truncate = true;
1309
1310 req = fuse_get_req(fc);
1311 if (IS_ERR(req))
1312 return PTR_ERR(req);
1313
1314 if (is_truncate)
1315 fuse_set_nowrite(inode);
1316
1317 memset(&inarg, 0, sizeof(inarg));
1318 memset(&outarg, 0, sizeof(outarg));
1319 iattr_to_fattr(attr, &inarg);
1320 if (file) {
1321 struct fuse_file *ff = file->private_data;
1322 inarg.valid |= FATTR_FH;
1323 inarg.fh = ff->fh;
1324 }
1325 if (attr->ia_valid & ATTR_SIZE) {
1326 /* For mandatory locking in truncate */
1327 inarg.valid |= FATTR_LOCKOWNER;
1328 inarg.lock_owner = fuse_lock_owner_id(fc, current->files);
1329 }
1330 req->in.h.opcode = FUSE_SETATTR;
1331 req->in.h.nodeid = get_node_id(inode);
1332 req->in.numargs = 1;
1333 req->in.args[0].size = sizeof(inarg);
1334 req->in.args[0].value = &inarg;
1335 req->out.numargs = 1;
1336 if (fc->minor < 9)
1337 req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
1338 else
1339 req->out.args[0].size = sizeof(outarg);
1340 req->out.args[0].value = &outarg;
1341 fuse_request_send(fc, req);
1342 err = req->out.h.error;
1343 fuse_put_request(fc, req);
1344 if (err) {
1345 if (err == -EINTR)
1346 fuse_invalidate_attr(inode);
1347 goto error;
1348 }
1349
1350 if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
1351 make_bad_inode(inode);
1352 err = -EIO;
1353 goto error;
1354 }
1355
1356 spin_lock(&fc->lock);
1357 fuse_change_attributes_common(inode, &outarg.attr,
1358 attr_timeout(&outarg));
1359 oldsize = inode->i_size;
1360 i_size_write(inode, outarg.attr.size);
1361
1362 if (is_truncate) {
1363 /* NOTE: this may release/reacquire fc->lock */
1364 __fuse_release_nowrite(inode);
1365 }
1366 spin_unlock(&fc->lock);
1367
1368 /*
1369 * Only call invalidate_inode_pages2() after removing
1370 * FUSE_NOWRITE, otherwise fuse_launder_page() would deadlock.
1371 */
1372 if (S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
1373 truncate_pagecache(inode, oldsize, outarg.attr.size);
1374 invalidate_inode_pages2(inode->i_mapping);
1375 }
1376
1377 return 0;
1378
1379 error:
1380 if (is_truncate)
1381 fuse_release_nowrite(inode);
1382
1383 return err;
1384 }
1385
1386 static int fuse_setattr(struct dentry *entry, struct iattr *attr)
1387 {
1388 if (attr->ia_valid & ATTR_FILE)
1389 return fuse_do_setattr(entry, attr, attr->ia_file);
1390 else
1391 return fuse_do_setattr(entry, attr, NULL);
1392 }
1393
1394 static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
1395 struct kstat *stat)
1396 {
1397 struct inode *inode = entry->d_inode;
1398 struct fuse_conn *fc = get_fuse_conn(inode);
1399
1400 if (!fuse_allow_task(fc, current))
1401 return -EACCES;
1402
1403 return fuse_update_attributes(inode, stat, NULL, NULL);
1404 }
1405
1406 static int fuse_setxattr(struct dentry *entry, const char *name,
1407 const void *value, size_t size, int flags)
1408 {
1409 struct inode *inode = entry->d_inode;
1410 struct fuse_conn *fc = get_fuse_conn(inode);
1411 struct fuse_req *req;
1412 struct fuse_setxattr_in inarg;
1413 int err;
1414
1415 if (fc->no_setxattr)
1416 return -EOPNOTSUPP;
1417
1418 req = fuse_get_req(fc);
1419 if (IS_ERR(req))
1420 return PTR_ERR(req);
1421
1422 memset(&inarg, 0, sizeof(inarg));
1423 inarg.size = size;
1424 inarg.flags = flags;
1425 req->in.h.opcode = FUSE_SETXATTR;
1426 req->in.h.nodeid = get_node_id(inode);
1427 req->in.numargs = 3;
1428 req->in.args[0].size = sizeof(inarg);
1429 req->in.args[0].value = &inarg;
1430 req->in.args[1].size = strlen(name) + 1;
1431 req->in.args[1].value = name;
1432 req->in.args[2].size = size;
1433 req->in.args[2].value = value;
1434 fuse_request_send(fc, req);
1435 err = req->out.h.error;
1436 fuse_put_request(fc, req);
1437 if (err == -ENOSYS) {
1438 fc->no_setxattr = 1;
1439 err = -EOPNOTSUPP;
1440 }
1441 return err;
1442 }
1443
1444 static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
1445 void *value, size_t size)
1446 {
1447 struct inode *inode = entry->d_inode;
1448 struct fuse_conn *fc = get_fuse_conn(inode);
1449 struct fuse_req *req;
1450 struct fuse_getxattr_in inarg;
1451 struct fuse_getxattr_out outarg;
1452 ssize_t ret;
1453
1454 if (fc->no_getxattr)
1455 return -EOPNOTSUPP;
1456
1457 req = fuse_get_req(fc);
1458 if (IS_ERR(req))
1459 return PTR_ERR(req);
1460
1461 memset(&inarg, 0, sizeof(inarg));
1462 inarg.size = size;
1463 req->in.h.opcode = FUSE_GETXATTR;
1464 req->in.h.nodeid = get_node_id(inode);
1465 req->in.numargs = 2;
1466 req->in.args[0].size = sizeof(inarg);
1467 req->in.args[0].value = &inarg;
1468 req->in.args[1].size = strlen(name) + 1;
1469 req->in.args[1].value = name;
1470 /* This is really two different operations rolled into one */
1471 req->out.numargs = 1;
1472 if (size) {
1473 req->out.argvar = 1;
1474 req->out.args[0].size = size;
1475 req->out.args[0].value = value;
1476 } else {
1477 req->out.args[0].size = sizeof(outarg);
1478 req->out.args[0].value = &outarg;
1479 }
1480 fuse_request_send(fc, req);
1481 ret = req->out.h.error;
1482 if (!ret)
1483 ret = size ? req->out.args[0].size : outarg.size;
1484 else {
1485 if (ret == -ENOSYS) {
1486 fc->no_getxattr = 1;
1487 ret = -EOPNOTSUPP;
1488 }
1489 }
1490 fuse_put_request(fc, req);
1491 return ret;
1492 }
1493
1494 static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
1495 {
1496 struct inode *inode = entry->d_inode;
1497 struct fuse_conn *fc = get_fuse_conn(inode);
1498 struct fuse_req *req;
1499 struct fuse_getxattr_in inarg;
1500 struct fuse_getxattr_out outarg;
1501 ssize_t ret;
1502
1503 if (!fuse_allow_task(fc, current))
1504 return -EACCES;
1505
1506 if (fc->no_listxattr)
1507 return -EOPNOTSUPP;
1508
1509 req = fuse_get_req(fc);
1510 if (IS_ERR(req))
1511 return PTR_ERR(req);
1512
1513 memset(&inarg, 0, sizeof(inarg));
1514 inarg.size = size;
1515 req->in.h.opcode = FUSE_LISTXATTR;
1516 req->in.h.nodeid = get_node_id(inode);
1517 req->in.numargs = 1;
1518 req->in.args[0].size = sizeof(inarg);
1519 req->in.args[0].value = &inarg;
1520 /* This is really two different operations rolled into one */
1521 req->out.numargs = 1;
1522 if (size) {
1523 req->out.argvar = 1;
1524 req->out.args[0].size = size;
1525 req->out.args[0].value = list;
1526 } else {
1527 req->out.args[0].size = sizeof(outarg);
1528 req->out.args[0].value = &outarg;
1529 }
1530 fuse_request_send(fc, req);
1531 ret = req->out.h.error;
1532 if (!ret)
1533 ret = size ? req->out.args[0].size : outarg.size;
1534 else {
1535 if (ret == -ENOSYS) {
1536 fc->no_listxattr = 1;
1537 ret = -EOPNOTSUPP;
1538 }
1539 }
1540 fuse_put_request(fc, req);
1541 return ret;
1542 }
1543
1544 static int fuse_removexattr(struct dentry *entry, const char *name)
1545 {
1546 struct inode *inode = entry->d_inode;
1547 struct fuse_conn *fc = get_fuse_conn(inode);
1548 struct fuse_req *req;
1549 int err;
1550
1551 if (fc->no_removexattr)
1552 return -EOPNOTSUPP;
1553
1554 req = fuse_get_req(fc);
1555 if (IS_ERR(req))
1556 return PTR_ERR(req);
1557
1558 req->in.h.opcode = FUSE_REMOVEXATTR;
1559 req->in.h.nodeid = get_node_id(inode);
1560 req->in.numargs = 1;
1561 req->in.args[0].size = strlen(name) + 1;
1562 req->in.args[0].value = name;
1563 fuse_request_send(fc, req);
1564 err = req->out.h.error;
1565 fuse_put_request(fc, req);
1566 if (err == -ENOSYS) {
1567 fc->no_removexattr = 1;
1568 err = -EOPNOTSUPP;
1569 }
1570 return err;
1571 }
1572
1573 static const struct inode_operations fuse_dir_inode_operations = {
1574 .lookup = fuse_lookup,
1575 .mkdir = fuse_mkdir,
1576 .symlink = fuse_symlink,
1577 .unlink = fuse_unlink,
1578 .rmdir = fuse_rmdir,
1579 .rename = fuse_rename,
1580 .link = fuse_link,
1581 .setattr = fuse_setattr,
1582 .create = fuse_create,
1583 .mknod = fuse_mknod,
1584 .permission = fuse_permission,
1585 .getattr = fuse_getattr,
1586 .setxattr = fuse_setxattr,
1587 .getxattr = fuse_getxattr,
1588 .listxattr = fuse_listxattr,
1589 .removexattr = fuse_removexattr,
1590 };
1591
1592 static const struct file_operations fuse_dir_operations = {
1593 .llseek = generic_file_llseek,
1594 .read = generic_read_dir,
1595 .readdir = fuse_readdir,
1596 .open = fuse_dir_open,
1597 .release = fuse_dir_release,
1598 .fsync = fuse_dir_fsync,
1599 };
1600
1601 static const struct inode_operations fuse_common_inode_operations = {
1602 .setattr = fuse_setattr,
1603 .permission = fuse_permission,
1604 .getattr = fuse_getattr,
1605 .setxattr = fuse_setxattr,
1606 .getxattr = fuse_getxattr,
1607 .listxattr = fuse_listxattr,
1608 .removexattr = fuse_removexattr,
1609 };
1610
1611 static const struct inode_operations fuse_symlink_inode_operations = {
1612 .setattr = fuse_setattr,
1613 .follow_link = fuse_follow_link,
1614 .put_link = fuse_put_link,
1615 .readlink = generic_readlink,
1616 .getattr = fuse_getattr,
1617 .setxattr = fuse_setxattr,
1618 .getxattr = fuse_getxattr,
1619 .listxattr = fuse_listxattr,
1620 .removexattr = fuse_removexattr,
1621 };
1622
1623 void fuse_init_common(struct inode *inode)
1624 {
1625 inode->i_op = &fuse_common_inode_operations;
1626 }
1627
1628 void fuse_init_dir(struct inode *inode)
1629 {
1630 inode->i_op = &fuse_dir_inode_operations;
1631 inode->i_fop = &fuse_dir_operations;
1632 }
1633
1634 void fuse_init_symlink(struct inode *inode)
1635 {
1636 inode->i_op = &fuse_symlink_inode_operations;
1637 }