]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - fs/namei.c
namei.c: let follow_link() do put_link() on failure
[mirror_ubuntu-focal-kernel.git] / fs / namei.c
1 /*
2 * linux/fs/namei.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 /*
8 * Some corrections by tytso.
9 */
10
11 /* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname
12 * lookup logic.
13 */
14 /* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture.
15 */
16
17 #include <linux/init.h>
18 #include <linux/export.h>
19 #include <linux/kernel.h>
20 #include <linux/slab.h>
21 #include <linux/fs.h>
22 #include <linux/namei.h>
23 #include <linux/pagemap.h>
24 #include <linux/fsnotify.h>
25 #include <linux/personality.h>
26 #include <linux/security.h>
27 #include <linux/ima.h>
28 #include <linux/syscalls.h>
29 #include <linux/mount.h>
30 #include <linux/audit.h>
31 #include <linux/capability.h>
32 #include <linux/file.h>
33 #include <linux/fcntl.h>
34 #include <linux/device_cgroup.h>
35 #include <linux/fs_struct.h>
36 #include <linux/posix_acl.h>
37 #include <asm/uaccess.h>
38
39 #include "internal.h"
40 #include "mount.h"
41
42 /* [Feb-1997 T. Schoebel-Theuer]
43 * Fundamental changes in the pathname lookup mechanisms (namei)
44 * were necessary because of omirr. The reason is that omirr needs
45 * to know the _real_ pathname, not the user-supplied one, in case
46 * of symlinks (and also when transname replacements occur).
47 *
48 * The new code replaces the old recursive symlink resolution with
49 * an iterative one (in case of non-nested symlink chains). It does
50 * this with calls to <fs>_follow_link().
51 * As a side effect, dir_namei(), _namei() and follow_link() are now
52 * replaced with a single function lookup_dentry() that can handle all
53 * the special cases of the former code.
54 *
55 * With the new dcache, the pathname is stored at each inode, at least as
56 * long as the refcount of the inode is positive. As a side effect, the
57 * size of the dcache depends on the inode cache and thus is dynamic.
58 *
59 * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink
60 * resolution to correspond with current state of the code.
61 *
62 * Note that the symlink resolution is not *completely* iterative.
63 * There is still a significant amount of tail- and mid- recursion in
64 * the algorithm. Also, note that <fs>_readlink() is not used in
65 * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink()
66 * may return different results than <fs>_follow_link(). Many virtual
67 * filesystems (including /proc) exhibit this behavior.
68 */
69
70 /* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation:
71 * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL
72 * and the name already exists in form of a symlink, try to create the new
73 * name indicated by the symlink. The old code always complained that the
74 * name already exists, due to not following the symlink even if its target
75 * is nonexistent. The new semantics affects also mknod() and link() when
76 * the name is a symlink pointing to a non-existent name.
77 *
78 * I don't know which semantics is the right one, since I have no access
79 * to standards. But I found by trial that HP-UX 9.0 has the full "new"
80 * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the
81 * "old" one. Personally, I think the new semantics is much more logical.
82 * Note that "ln old new" where "new" is a symlink pointing to a non-existing
83 * file does succeed in both HP-UX and SunOs, but not in Solaris
84 * and in the old Linux semantics.
85 */
86
87 /* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink
88 * semantics. See the comments in "open_namei" and "do_link" below.
89 *
90 * [10-Sep-98 Alan Modra] Another symlink change.
91 */
92
93 /* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks:
94 * inside the path - always follow.
95 * in the last component in creation/removal/renaming - never follow.
96 * if LOOKUP_FOLLOW passed - follow.
97 * if the pathname has trailing slashes - follow.
98 * otherwise - don't follow.
99 * (applied in that order).
100 *
101 * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT
102 * restored for 2.4. This is the last surviving part of old 4.2BSD bug.
103 * During the 2.4 we need to fix the userland stuff depending on it -
104 * hopefully we will be able to get rid of that wart in 2.5. So far only
105 * XEmacs seems to be relying on it...
106 */
107 /*
108 * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
109 * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives
110 * any extra contention...
111 */
112
113 /* In order to reduce some races, while at the same time doing additional
114 * checking and hopefully speeding things up, we copy filenames to the
115 * kernel data space before using them..
116 *
117 * POSIX.1 2.4: an empty pathname is invalid (ENOENT).
118 * PATH_MAX includes the nul terminator --RR.
119 */
120 static char *getname_flags(const char __user *filename, int flags, int *empty)
121 {
122 char *result = __getname(), *err;
123 int len;
124
125 if (unlikely(!result))
126 return ERR_PTR(-ENOMEM);
127
128 len = strncpy_from_user(result, filename, PATH_MAX);
129 err = ERR_PTR(len);
130 if (unlikely(len < 0))
131 goto error;
132
133 /* The empty path is special. */
134 if (unlikely(!len)) {
135 if (empty)
136 *empty = 1;
137 err = ERR_PTR(-ENOENT);
138 if (!(flags & LOOKUP_EMPTY))
139 goto error;
140 }
141
142 err = ERR_PTR(-ENAMETOOLONG);
143 if (likely(len < PATH_MAX)) {
144 audit_getname(result);
145 return result;
146 }
147
148 error:
149 __putname(result);
150 return err;
151 }
152
153 char *getname(const char __user * filename)
154 {
155 return getname_flags(filename, 0, NULL);
156 }
157
158 #ifdef CONFIG_AUDITSYSCALL
159 void putname(const char *name)
160 {
161 if (unlikely(!audit_dummy_context()))
162 audit_putname(name);
163 else
164 __putname(name);
165 }
166 EXPORT_SYMBOL(putname);
167 #endif
168
169 static int check_acl(struct inode *inode, int mask)
170 {
171 #ifdef CONFIG_FS_POSIX_ACL
172 struct posix_acl *acl;
173
174 if (mask & MAY_NOT_BLOCK) {
175 acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
176 if (!acl)
177 return -EAGAIN;
178 /* no ->get_acl() calls in RCU mode... */
179 if (acl == ACL_NOT_CACHED)
180 return -ECHILD;
181 return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
182 }
183
184 acl = get_cached_acl(inode, ACL_TYPE_ACCESS);
185
186 /*
187 * A filesystem can force a ACL callback by just never filling the
188 * ACL cache. But normally you'd fill the cache either at inode
189 * instantiation time, or on the first ->get_acl call.
190 *
191 * If the filesystem doesn't have a get_acl() function at all, we'll
192 * just create the negative cache entry.
193 */
194 if (acl == ACL_NOT_CACHED) {
195 if (inode->i_op->get_acl) {
196 acl = inode->i_op->get_acl(inode, ACL_TYPE_ACCESS);
197 if (IS_ERR(acl))
198 return PTR_ERR(acl);
199 } else {
200 set_cached_acl(inode, ACL_TYPE_ACCESS, NULL);
201 return -EAGAIN;
202 }
203 }
204
205 if (acl) {
206 int error = posix_acl_permission(inode, acl, mask);
207 posix_acl_release(acl);
208 return error;
209 }
210 #endif
211
212 return -EAGAIN;
213 }
214
215 /*
216 * This does the basic permission checking
217 */
218 static int acl_permission_check(struct inode *inode, int mask)
219 {
220 unsigned int mode = inode->i_mode;
221
222 if (likely(uid_eq(current_fsuid(), inode->i_uid)))
223 mode >>= 6;
224 else {
225 if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
226 int error = check_acl(inode, mask);
227 if (error != -EAGAIN)
228 return error;
229 }
230
231 if (in_group_p(inode->i_gid))
232 mode >>= 3;
233 }
234
235 /*
236 * If the DACs are ok we don't need any capability check.
237 */
238 if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
239 return 0;
240 return -EACCES;
241 }
242
243 /**
244 * generic_permission - check for access rights on a Posix-like filesystem
245 * @inode: inode to check access rights for
246 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
247 *
248 * Used to check for read/write/execute permissions on a file.
249 * We use "fsuid" for this, letting us set arbitrary permissions
250 * for filesystem access without changing the "normal" uids which
251 * are used for other things.
252 *
253 * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
254 * request cannot be satisfied (eg. requires blocking or too much complexity).
255 * It would then be called again in ref-walk mode.
256 */
257 int generic_permission(struct inode *inode, int mask)
258 {
259 int ret;
260
261 /*
262 * Do the basic permission checks.
263 */
264 ret = acl_permission_check(inode, mask);
265 if (ret != -EACCES)
266 return ret;
267
268 if (S_ISDIR(inode->i_mode)) {
269 /* DACs are overridable for directories */
270 if (inode_capable(inode, CAP_DAC_OVERRIDE))
271 return 0;
272 if (!(mask & MAY_WRITE))
273 if (inode_capable(inode, CAP_DAC_READ_SEARCH))
274 return 0;
275 return -EACCES;
276 }
277 /*
278 * Read/write DACs are always overridable.
279 * Executable DACs are overridable when there is
280 * at least one exec bit set.
281 */
282 if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
283 if (inode_capable(inode, CAP_DAC_OVERRIDE))
284 return 0;
285
286 /*
287 * Searching includes executable on directories, else just read.
288 */
289 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
290 if (mask == MAY_READ)
291 if (inode_capable(inode, CAP_DAC_READ_SEARCH))
292 return 0;
293
294 return -EACCES;
295 }
296
297 /*
298 * We _really_ want to just do "generic_permission()" without
299 * even looking at the inode->i_op values. So we keep a cache
300 * flag in inode->i_opflags, that says "this has not special
301 * permission function, use the fast case".
302 */
303 static inline int do_inode_permission(struct inode *inode, int mask)
304 {
305 if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
306 if (likely(inode->i_op->permission))
307 return inode->i_op->permission(inode, mask);
308
309 /* This gets set once for the inode lifetime */
310 spin_lock(&inode->i_lock);
311 inode->i_opflags |= IOP_FASTPERM;
312 spin_unlock(&inode->i_lock);
313 }
314 return generic_permission(inode, mask);
315 }
316
317 /**
318 * inode_permission - check for access rights to a given inode
319 * @inode: inode to check permission on
320 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
321 *
322 * Used to check for read/write/execute permissions on an inode.
323 * We use "fsuid" for this, letting us set arbitrary permissions
324 * for filesystem access without changing the "normal" uids which
325 * are used for other things.
326 *
327 * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
328 */
329 int inode_permission(struct inode *inode, int mask)
330 {
331 int retval;
332
333 if (unlikely(mask & MAY_WRITE)) {
334 umode_t mode = inode->i_mode;
335
336 /*
337 * Nobody gets write access to a read-only fs.
338 */
339 if (IS_RDONLY(inode) &&
340 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
341 return -EROFS;
342
343 /*
344 * Nobody gets write access to an immutable file.
345 */
346 if (IS_IMMUTABLE(inode))
347 return -EACCES;
348 }
349
350 retval = do_inode_permission(inode, mask);
351 if (retval)
352 return retval;
353
354 retval = devcgroup_inode_permission(inode, mask);
355 if (retval)
356 return retval;
357
358 return security_inode_permission(inode, mask);
359 }
360
361 /**
362 * path_get - get a reference to a path
363 * @path: path to get the reference to
364 *
365 * Given a path increment the reference count to the dentry and the vfsmount.
366 */
367 void path_get(struct path *path)
368 {
369 mntget(path->mnt);
370 dget(path->dentry);
371 }
372 EXPORT_SYMBOL(path_get);
373
374 /**
375 * path_put - put a reference to a path
376 * @path: path to put the reference to
377 *
378 * Given a path decrement the reference count to the dentry and the vfsmount.
379 */
380 void path_put(struct path *path)
381 {
382 dput(path->dentry);
383 mntput(path->mnt);
384 }
385 EXPORT_SYMBOL(path_put);
386
387 /*
388 * Path walking has 2 modes, rcu-walk and ref-walk (see
389 * Documentation/filesystems/path-lookup.txt). In situations when we can't
390 * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab
391 * normal reference counts on dentries and vfsmounts to transition to rcu-walk
392 * mode. Refcounts are grabbed at the last known good point before rcu-walk
393 * got stuck, so ref-walk may continue from there. If this is not successful
394 * (eg. a seqcount has changed), then failure is returned and it's up to caller
395 * to restart the path walk from the beginning in ref-walk mode.
396 */
397
398 /**
399 * unlazy_walk - try to switch to ref-walk mode.
400 * @nd: nameidata pathwalk data
401 * @dentry: child of nd->path.dentry or NULL
402 * Returns: 0 on success, -ECHILD on failure
403 *
404 * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry
405 * for ref-walk mode. @dentry must be a path found by a do_lookup call on
406 * @nd or NULL. Must be called from rcu-walk context.
407 */
408 static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
409 {
410 struct fs_struct *fs = current->fs;
411 struct dentry *parent = nd->path.dentry;
412 int want_root = 0;
413
414 BUG_ON(!(nd->flags & LOOKUP_RCU));
415 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
416 want_root = 1;
417 spin_lock(&fs->lock);
418 if (nd->root.mnt != fs->root.mnt ||
419 nd->root.dentry != fs->root.dentry)
420 goto err_root;
421 }
422 spin_lock(&parent->d_lock);
423 if (!dentry) {
424 if (!__d_rcu_to_refcount(parent, nd->seq))
425 goto err_parent;
426 BUG_ON(nd->inode != parent->d_inode);
427 } else {
428 if (dentry->d_parent != parent)
429 goto err_parent;
430 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
431 if (!__d_rcu_to_refcount(dentry, nd->seq))
432 goto err_child;
433 /*
434 * If the sequence check on the child dentry passed, then
435 * the child has not been removed from its parent. This
436 * means the parent dentry must be valid and able to take
437 * a reference at this point.
438 */
439 BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
440 BUG_ON(!parent->d_count);
441 parent->d_count++;
442 spin_unlock(&dentry->d_lock);
443 }
444 spin_unlock(&parent->d_lock);
445 if (want_root) {
446 path_get(&nd->root);
447 spin_unlock(&fs->lock);
448 }
449 mntget(nd->path.mnt);
450
451 rcu_read_unlock();
452 br_read_unlock(&vfsmount_lock);
453 nd->flags &= ~LOOKUP_RCU;
454 return 0;
455
456 err_child:
457 spin_unlock(&dentry->d_lock);
458 err_parent:
459 spin_unlock(&parent->d_lock);
460 err_root:
461 if (want_root)
462 spin_unlock(&fs->lock);
463 return -ECHILD;
464 }
465
466 /**
467 * release_open_intent - free up open intent resources
468 * @nd: pointer to nameidata
469 */
470 void release_open_intent(struct nameidata *nd)
471 {
472 struct file *file = nd->intent.open.file;
473
474 if (file && !IS_ERR(file)) {
475 if (file->f_path.dentry == NULL)
476 put_filp(file);
477 else
478 fput(file);
479 }
480 }
481
482 static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd)
483 {
484 return dentry->d_op->d_revalidate(dentry, nd);
485 }
486
487 /**
488 * complete_walk - successful completion of path walk
489 * @nd: pointer nameidata
490 *
491 * If we had been in RCU mode, drop out of it and legitimize nd->path.
492 * Revalidate the final result, unless we'd already done that during
493 * the path walk or the filesystem doesn't ask for it. Return 0 on
494 * success, -error on failure. In case of failure caller does not
495 * need to drop nd->path.
496 */
497 static int complete_walk(struct nameidata *nd)
498 {
499 struct dentry *dentry = nd->path.dentry;
500 int status;
501
502 if (nd->flags & LOOKUP_RCU) {
503 nd->flags &= ~LOOKUP_RCU;
504 if (!(nd->flags & LOOKUP_ROOT))
505 nd->root.mnt = NULL;
506 spin_lock(&dentry->d_lock);
507 if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
508 spin_unlock(&dentry->d_lock);
509 rcu_read_unlock();
510 br_read_unlock(&vfsmount_lock);
511 return -ECHILD;
512 }
513 BUG_ON(nd->inode != dentry->d_inode);
514 spin_unlock(&dentry->d_lock);
515 mntget(nd->path.mnt);
516 rcu_read_unlock();
517 br_read_unlock(&vfsmount_lock);
518 }
519
520 if (likely(!(nd->flags & LOOKUP_JUMPED)))
521 return 0;
522
523 if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
524 return 0;
525
526 if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)))
527 return 0;
528
529 /* Note: we do not d_invalidate() */
530 status = d_revalidate(dentry, nd);
531 if (status > 0)
532 return 0;
533
534 if (!status)
535 status = -ESTALE;
536
537 path_put(&nd->path);
538 return status;
539 }
540
541 static __always_inline void set_root(struct nameidata *nd)
542 {
543 if (!nd->root.mnt)
544 get_fs_root(current->fs, &nd->root);
545 }
546
547 static int link_path_walk(const char *, struct nameidata *);
548
549 static __always_inline void set_root_rcu(struct nameidata *nd)
550 {
551 if (!nd->root.mnt) {
552 struct fs_struct *fs = current->fs;
553 unsigned seq;
554
555 do {
556 seq = read_seqcount_begin(&fs->seq);
557 nd->root = fs->root;
558 nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
559 } while (read_seqcount_retry(&fs->seq, seq));
560 }
561 }
562
563 static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link)
564 {
565 int ret;
566
567 if (IS_ERR(link))
568 goto fail;
569
570 if (*link == '/') {
571 set_root(nd);
572 path_put(&nd->path);
573 nd->path = nd->root;
574 path_get(&nd->root);
575 nd->flags |= LOOKUP_JUMPED;
576 }
577 nd->inode = nd->path.dentry->d_inode;
578
579 ret = link_path_walk(link, nd);
580 return ret;
581 fail:
582 path_put(&nd->path);
583 return PTR_ERR(link);
584 }
585
586 static void path_put_conditional(struct path *path, struct nameidata *nd)
587 {
588 dput(path->dentry);
589 if (path->mnt != nd->path.mnt)
590 mntput(path->mnt);
591 }
592
593 static inline void path_to_nameidata(const struct path *path,
594 struct nameidata *nd)
595 {
596 if (!(nd->flags & LOOKUP_RCU)) {
597 dput(nd->path.dentry);
598 if (nd->path.mnt != path->mnt)
599 mntput(nd->path.mnt);
600 }
601 nd->path.mnt = path->mnt;
602 nd->path.dentry = path->dentry;
603 }
604
605 static inline void put_link(struct nameidata *nd, struct path *link, void *cookie)
606 {
607 struct inode *inode = link->dentry->d_inode;
608 if (inode->i_op->put_link)
609 inode->i_op->put_link(link->dentry, nd, cookie);
610 path_put(link);
611 }
612
613 static __always_inline int
614 follow_link(struct path *link, struct nameidata *nd, void **p)
615 {
616 struct dentry *dentry = link->dentry;
617 int error;
618 char *s;
619
620 BUG_ON(nd->flags & LOOKUP_RCU);
621
622 if (link->mnt == nd->path.mnt)
623 mntget(link->mnt);
624
625 error = -ELOOP;
626 if (unlikely(current->total_link_count >= 40))
627 goto out_put_nd_path;
628
629 cond_resched();
630 current->total_link_count++;
631
632 touch_atime(link);
633 nd_set_link(nd, NULL);
634
635 error = security_inode_follow_link(link->dentry, nd);
636 if (error)
637 goto out_put_nd_path;
638
639 nd->last_type = LAST_BIND;
640 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
641 error = PTR_ERR(*p);
642 if (IS_ERR(*p))
643 goto out_put_link;
644
645 error = 0;
646 s = nd_get_link(nd);
647 if (s) {
648 error = __vfs_follow_link(nd, s);
649 } else if (nd->last_type == LAST_BIND) {
650 nd->flags |= LOOKUP_JUMPED;
651 nd->inode = nd->path.dentry->d_inode;
652 if (nd->inode->i_op->follow_link) {
653 /* stepped on a _really_ weird one */
654 path_put(&nd->path);
655 error = -ELOOP;
656 }
657 }
658 if (unlikely(error))
659 put_link(nd, link, *p);
660
661 return error;
662
663 out_put_nd_path:
664 path_put(&nd->path);
665 out_put_link:
666 path_put(link);
667 return error;
668 }
669
670 static int follow_up_rcu(struct path *path)
671 {
672 struct mount *mnt = real_mount(path->mnt);
673 struct mount *parent;
674 struct dentry *mountpoint;
675
676 parent = mnt->mnt_parent;
677 if (&parent->mnt == path->mnt)
678 return 0;
679 mountpoint = mnt->mnt_mountpoint;
680 path->dentry = mountpoint;
681 path->mnt = &parent->mnt;
682 return 1;
683 }
684
685 int follow_up(struct path *path)
686 {
687 struct mount *mnt = real_mount(path->mnt);
688 struct mount *parent;
689 struct dentry *mountpoint;
690
691 br_read_lock(&vfsmount_lock);
692 parent = mnt->mnt_parent;
693 if (&parent->mnt == path->mnt) {
694 br_read_unlock(&vfsmount_lock);
695 return 0;
696 }
697 mntget(&parent->mnt);
698 mountpoint = dget(mnt->mnt_mountpoint);
699 br_read_unlock(&vfsmount_lock);
700 dput(path->dentry);
701 path->dentry = mountpoint;
702 mntput(path->mnt);
703 path->mnt = &parent->mnt;
704 return 1;
705 }
706
707 /*
708 * Perform an automount
709 * - return -EISDIR to tell follow_managed() to stop and return the path we
710 * were called with.
711 */
712 static int follow_automount(struct path *path, unsigned flags,
713 bool *need_mntput)
714 {
715 struct vfsmount *mnt;
716 int err;
717
718 if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
719 return -EREMOTE;
720
721 /* We don't want to mount if someone's just doing a stat -
722 * unless they're stat'ing a directory and appended a '/' to
723 * the name.
724 *
725 * We do, however, want to mount if someone wants to open or
726 * create a file of any type under the mountpoint, wants to
727 * traverse through the mountpoint or wants to open the
728 * mounted directory. Also, autofs may mark negative dentries
729 * as being automount points. These will need the attentions
730 * of the daemon to instantiate them before they can be used.
731 */
732 if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
733 LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
734 path->dentry->d_inode)
735 return -EISDIR;
736
737 current->total_link_count++;
738 if (current->total_link_count >= 40)
739 return -ELOOP;
740
741 mnt = path->dentry->d_op->d_automount(path);
742 if (IS_ERR(mnt)) {
743 /*
744 * The filesystem is allowed to return -EISDIR here to indicate
745 * it doesn't want to automount. For instance, autofs would do
746 * this so that its userspace daemon can mount on this dentry.
747 *
748 * However, we can only permit this if it's a terminal point in
749 * the path being looked up; if it wasn't then the remainder of
750 * the path is inaccessible and we should say so.
751 */
752 if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_PARENT))
753 return -EREMOTE;
754 return PTR_ERR(mnt);
755 }
756
757 if (!mnt) /* mount collision */
758 return 0;
759
760 if (!*need_mntput) {
761 /* lock_mount() may release path->mnt on error */
762 mntget(path->mnt);
763 *need_mntput = true;
764 }
765 err = finish_automount(mnt, path);
766
767 switch (err) {
768 case -EBUSY:
769 /* Someone else made a mount here whilst we were busy */
770 return 0;
771 case 0:
772 path_put(path);
773 path->mnt = mnt;
774 path->dentry = dget(mnt->mnt_root);
775 return 0;
776 default:
777 return err;
778 }
779
780 }
781
782 /*
783 * Handle a dentry that is managed in some way.
784 * - Flagged for transit management (autofs)
785 * - Flagged as mountpoint
786 * - Flagged as automount point
787 *
788 * This may only be called in refwalk mode.
789 *
790 * Serialization is taken care of in namespace.c
791 */
792 static int follow_managed(struct path *path, unsigned flags)
793 {
794 struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
795 unsigned managed;
796 bool need_mntput = false;
797 int ret = 0;
798
799 /* Given that we're not holding a lock here, we retain the value in a
800 * local variable for each dentry as we look at it so that we don't see
801 * the components of that value change under us */
802 while (managed = ACCESS_ONCE(path->dentry->d_flags),
803 managed &= DCACHE_MANAGED_DENTRY,
804 unlikely(managed != 0)) {
805 /* Allow the filesystem to manage the transit without i_mutex
806 * being held. */
807 if (managed & DCACHE_MANAGE_TRANSIT) {
808 BUG_ON(!path->dentry->d_op);
809 BUG_ON(!path->dentry->d_op->d_manage);
810 ret = path->dentry->d_op->d_manage(path->dentry, false);
811 if (ret < 0)
812 break;
813 }
814
815 /* Transit to a mounted filesystem. */
816 if (managed & DCACHE_MOUNTED) {
817 struct vfsmount *mounted = lookup_mnt(path);
818 if (mounted) {
819 dput(path->dentry);
820 if (need_mntput)
821 mntput(path->mnt);
822 path->mnt = mounted;
823 path->dentry = dget(mounted->mnt_root);
824 need_mntput = true;
825 continue;
826 }
827
828 /* Something is mounted on this dentry in another
829 * namespace and/or whatever was mounted there in this
830 * namespace got unmounted before we managed to get the
831 * vfsmount_lock */
832 }
833
834 /* Handle an automount point */
835 if (managed & DCACHE_NEED_AUTOMOUNT) {
836 ret = follow_automount(path, flags, &need_mntput);
837 if (ret < 0)
838 break;
839 continue;
840 }
841
842 /* We didn't change the current path point */
843 break;
844 }
845
846 if (need_mntput && path->mnt == mnt)
847 mntput(path->mnt);
848 if (ret == -EISDIR)
849 ret = 0;
850 return ret < 0 ? ret : need_mntput;
851 }
852
853 int follow_down_one(struct path *path)
854 {
855 struct vfsmount *mounted;
856
857 mounted = lookup_mnt(path);
858 if (mounted) {
859 dput(path->dentry);
860 mntput(path->mnt);
861 path->mnt = mounted;
862 path->dentry = dget(mounted->mnt_root);
863 return 1;
864 }
865 return 0;
866 }
867
868 static inline bool managed_dentry_might_block(struct dentry *dentry)
869 {
870 return (dentry->d_flags & DCACHE_MANAGE_TRANSIT &&
871 dentry->d_op->d_manage(dentry, true) < 0);
872 }
873
874 /*
875 * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
876 * we meet a managed dentry that would need blocking.
877 */
878 static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
879 struct inode **inode)
880 {
881 for (;;) {
882 struct mount *mounted;
883 /*
884 * Don't forget we might have a non-mountpoint managed dentry
885 * that wants to block transit.
886 */
887 if (unlikely(managed_dentry_might_block(path->dentry)))
888 return false;
889
890 if (!d_mountpoint(path->dentry))
891 break;
892
893 mounted = __lookup_mnt(path->mnt, path->dentry, 1);
894 if (!mounted)
895 break;
896 path->mnt = &mounted->mnt;
897 path->dentry = mounted->mnt.mnt_root;
898 nd->flags |= LOOKUP_JUMPED;
899 nd->seq = read_seqcount_begin(&path->dentry->d_seq);
900 /*
901 * Update the inode too. We don't need to re-check the
902 * dentry sequence number here after this d_inode read,
903 * because a mount-point is always pinned.
904 */
905 *inode = path->dentry->d_inode;
906 }
907 return true;
908 }
909
910 static void follow_mount_rcu(struct nameidata *nd)
911 {
912 while (d_mountpoint(nd->path.dentry)) {
913 struct mount *mounted;
914 mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry, 1);
915 if (!mounted)
916 break;
917 nd->path.mnt = &mounted->mnt;
918 nd->path.dentry = mounted->mnt.mnt_root;
919 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
920 }
921 }
922
923 static int follow_dotdot_rcu(struct nameidata *nd)
924 {
925 set_root_rcu(nd);
926
927 while (1) {
928 if (nd->path.dentry == nd->root.dentry &&
929 nd->path.mnt == nd->root.mnt) {
930 break;
931 }
932 if (nd->path.dentry != nd->path.mnt->mnt_root) {
933 struct dentry *old = nd->path.dentry;
934 struct dentry *parent = old->d_parent;
935 unsigned seq;
936
937 seq = read_seqcount_begin(&parent->d_seq);
938 if (read_seqcount_retry(&old->d_seq, nd->seq))
939 goto failed;
940 nd->path.dentry = parent;
941 nd->seq = seq;
942 break;
943 }
944 if (!follow_up_rcu(&nd->path))
945 break;
946 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
947 }
948 follow_mount_rcu(nd);
949 nd->inode = nd->path.dentry->d_inode;
950 return 0;
951
952 failed:
953 nd->flags &= ~LOOKUP_RCU;
954 if (!(nd->flags & LOOKUP_ROOT))
955 nd->root.mnt = NULL;
956 rcu_read_unlock();
957 br_read_unlock(&vfsmount_lock);
958 return -ECHILD;
959 }
960
961 /*
962 * Follow down to the covering mount currently visible to userspace. At each
963 * point, the filesystem owning that dentry may be queried as to whether the
964 * caller is permitted to proceed or not.
965 */
966 int follow_down(struct path *path)
967 {
968 unsigned managed;
969 int ret;
970
971 while (managed = ACCESS_ONCE(path->dentry->d_flags),
972 unlikely(managed & DCACHE_MANAGED_DENTRY)) {
973 /* Allow the filesystem to manage the transit without i_mutex
974 * being held.
975 *
976 * We indicate to the filesystem if someone is trying to mount
977 * something here. This gives autofs the chance to deny anyone
978 * other than its daemon the right to mount on its
979 * superstructure.
980 *
981 * The filesystem may sleep at this point.
982 */
983 if (managed & DCACHE_MANAGE_TRANSIT) {
984 BUG_ON(!path->dentry->d_op);
985 BUG_ON(!path->dentry->d_op->d_manage);
986 ret = path->dentry->d_op->d_manage(
987 path->dentry, false);
988 if (ret < 0)
989 return ret == -EISDIR ? 0 : ret;
990 }
991
992 /* Transit to a mounted filesystem. */
993 if (managed & DCACHE_MOUNTED) {
994 struct vfsmount *mounted = lookup_mnt(path);
995 if (!mounted)
996 break;
997 dput(path->dentry);
998 mntput(path->mnt);
999 path->mnt = mounted;
1000 path->dentry = dget(mounted->mnt_root);
1001 continue;
1002 }
1003
1004 /* Don't handle automount points here */
1005 break;
1006 }
1007 return 0;
1008 }
1009
1010 /*
1011 * Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
1012 */
1013 static void follow_mount(struct path *path)
1014 {
1015 while (d_mountpoint(path->dentry)) {
1016 struct vfsmount *mounted = lookup_mnt(path);
1017 if (!mounted)
1018 break;
1019 dput(path->dentry);
1020 mntput(path->mnt);
1021 path->mnt = mounted;
1022 path->dentry = dget(mounted->mnt_root);
1023 }
1024 }
1025
1026 static void follow_dotdot(struct nameidata *nd)
1027 {
1028 set_root(nd);
1029
1030 while(1) {
1031 struct dentry *old = nd->path.dentry;
1032
1033 if (nd->path.dentry == nd->root.dentry &&
1034 nd->path.mnt == nd->root.mnt) {
1035 break;
1036 }
1037 if (nd->path.dentry != nd->path.mnt->mnt_root) {
1038 /* rare case of legitimate dget_parent()... */
1039 nd->path.dentry = dget_parent(nd->path.dentry);
1040 dput(old);
1041 break;
1042 }
1043 if (!follow_up(&nd->path))
1044 break;
1045 }
1046 follow_mount(&nd->path);
1047 nd->inode = nd->path.dentry->d_inode;
1048 }
1049
1050 /*
1051 * This looks up the name in dcache, possibly revalidates the old dentry and
1052 * allocates a new one if not found or not valid. In the need_lookup argument
1053 * returns whether i_op->lookup is necessary.
1054 *
1055 * dir->d_inode->i_mutex must be held
1056 */
1057 static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
1058 struct nameidata *nd, bool *need_lookup)
1059 {
1060 struct dentry *dentry;
1061 int error;
1062
1063 *need_lookup = false;
1064 dentry = d_lookup(dir, name);
1065 if (dentry) {
1066 if (d_need_lookup(dentry)) {
1067 *need_lookup = true;
1068 } else if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
1069 error = d_revalidate(dentry, nd);
1070 if (unlikely(error <= 0)) {
1071 if (error < 0) {
1072 dput(dentry);
1073 return ERR_PTR(error);
1074 } else if (!d_invalidate(dentry)) {
1075 dput(dentry);
1076 dentry = NULL;
1077 }
1078 }
1079 }
1080 }
1081
1082 if (!dentry) {
1083 dentry = d_alloc(dir, name);
1084 if (unlikely(!dentry))
1085 return ERR_PTR(-ENOMEM);
1086
1087 *need_lookup = true;
1088 }
1089 return dentry;
1090 }
1091
1092 /*
1093 * Call i_op->lookup on the dentry. The dentry must be negative but may be
1094 * hashed if it was pouplated with DCACHE_NEED_LOOKUP.
1095 *
1096 * dir->d_inode->i_mutex must be held
1097 */
1098 static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
1099 struct nameidata *nd)
1100 {
1101 struct dentry *old;
1102
1103 /* Don't create child dentry for a dead directory. */
1104 if (unlikely(IS_DEADDIR(dir))) {
1105 dput(dentry);
1106 return ERR_PTR(-ENOENT);
1107 }
1108
1109 old = dir->i_op->lookup(dir, dentry, nd);
1110 if (unlikely(old)) {
1111 dput(dentry);
1112 dentry = old;
1113 }
1114 return dentry;
1115 }
1116
1117 static struct dentry *__lookup_hash(struct qstr *name,
1118 struct dentry *base, struct nameidata *nd)
1119 {
1120 bool need_lookup;
1121 struct dentry *dentry;
1122
1123 dentry = lookup_dcache(name, base, nd, &need_lookup);
1124 if (!need_lookup)
1125 return dentry;
1126
1127 return lookup_real(base->d_inode, dentry, nd);
1128 }
1129
1130 /*
1131 * It's more convoluted than I'd like it to be, but... it's still fairly
1132 * small and for now I'd prefer to have fast path as straight as possible.
1133 * It _is_ time-critical.
1134 */
1135 static int lookup_fast(struct nameidata *nd, struct qstr *name,
1136 struct path *path, struct inode **inode)
1137 {
1138 struct vfsmount *mnt = nd->path.mnt;
1139 struct dentry *dentry, *parent = nd->path.dentry;
1140 int need_reval = 1;
1141 int status = 1;
1142 int err;
1143
1144 /*
1145 * Rename seqlock is not required here because in the off chance
1146 * of a false negative due to a concurrent rename, we're going to
1147 * do the non-racy lookup, below.
1148 */
1149 if (nd->flags & LOOKUP_RCU) {
1150 unsigned seq;
1151 dentry = __d_lookup_rcu(parent, name, &seq, nd->inode);
1152 if (!dentry)
1153 goto unlazy;
1154
1155 /*
1156 * This sequence count validates that the inode matches
1157 * the dentry name information from lookup.
1158 */
1159 *inode = dentry->d_inode;
1160 if (read_seqcount_retry(&dentry->d_seq, seq))
1161 return -ECHILD;
1162
1163 /*
1164 * This sequence count validates that the parent had no
1165 * changes while we did the lookup of the dentry above.
1166 *
1167 * The memory barrier in read_seqcount_begin of child is
1168 * enough, we can use __read_seqcount_retry here.
1169 */
1170 if (__read_seqcount_retry(&parent->d_seq, nd->seq))
1171 return -ECHILD;
1172 nd->seq = seq;
1173
1174 if (unlikely(d_need_lookup(dentry)))
1175 goto unlazy;
1176 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
1177 status = d_revalidate(dentry, nd);
1178 if (unlikely(status <= 0)) {
1179 if (status != -ECHILD)
1180 need_reval = 0;
1181 goto unlazy;
1182 }
1183 }
1184 path->mnt = mnt;
1185 path->dentry = dentry;
1186 if (unlikely(!__follow_mount_rcu(nd, path, inode)))
1187 goto unlazy;
1188 if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
1189 goto unlazy;
1190 return 0;
1191 unlazy:
1192 if (unlazy_walk(nd, dentry))
1193 return -ECHILD;
1194 } else {
1195 dentry = __d_lookup(parent, name);
1196 }
1197
1198 if (unlikely(!dentry))
1199 goto need_lookup;
1200
1201 if (unlikely(d_need_lookup(dentry))) {
1202 dput(dentry);
1203 goto need_lookup;
1204 }
1205
1206 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
1207 status = d_revalidate(dentry, nd);
1208 if (unlikely(status <= 0)) {
1209 if (status < 0) {
1210 dput(dentry);
1211 return status;
1212 }
1213 if (!d_invalidate(dentry)) {
1214 dput(dentry);
1215 goto need_lookup;
1216 }
1217 }
1218
1219 path->mnt = mnt;
1220 path->dentry = dentry;
1221 err = follow_managed(path, nd->flags);
1222 if (unlikely(err < 0)) {
1223 path_put_conditional(path, nd);
1224 return err;
1225 }
1226 if (err)
1227 nd->flags |= LOOKUP_JUMPED;
1228 *inode = path->dentry->d_inode;
1229 return 0;
1230
1231 need_lookup:
1232 return 1;
1233 }
1234
1235 /* Fast lookup failed, do it the slow way */
1236 static int lookup_slow(struct nameidata *nd, struct qstr *name,
1237 struct path *path)
1238 {
1239 struct dentry *dentry, *parent;
1240 int err;
1241
1242 parent = nd->path.dentry;
1243 BUG_ON(nd->inode != parent->d_inode);
1244
1245 mutex_lock(&parent->d_inode->i_mutex);
1246 dentry = __lookup_hash(name, parent, nd);
1247 mutex_unlock(&parent->d_inode->i_mutex);
1248 if (IS_ERR(dentry))
1249 return PTR_ERR(dentry);
1250 path->mnt = nd->path.mnt;
1251 path->dentry = dentry;
1252 err = follow_managed(path, nd->flags);
1253 if (unlikely(err < 0)) {
1254 path_put_conditional(path, nd);
1255 return err;
1256 }
1257 if (err)
1258 nd->flags |= LOOKUP_JUMPED;
1259 return 0;
1260 }
1261
1262 static inline int may_lookup(struct nameidata *nd)
1263 {
1264 if (nd->flags & LOOKUP_RCU) {
1265 int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
1266 if (err != -ECHILD)
1267 return err;
1268 if (unlazy_walk(nd, NULL))
1269 return -ECHILD;
1270 }
1271 return inode_permission(nd->inode, MAY_EXEC);
1272 }
1273
1274 static inline int handle_dots(struct nameidata *nd, int type)
1275 {
1276 if (type == LAST_DOTDOT) {
1277 if (nd->flags & LOOKUP_RCU) {
1278 if (follow_dotdot_rcu(nd))
1279 return -ECHILD;
1280 } else
1281 follow_dotdot(nd);
1282 }
1283 return 0;
1284 }
1285
1286 static void terminate_walk(struct nameidata *nd)
1287 {
1288 if (!(nd->flags & LOOKUP_RCU)) {
1289 path_put(&nd->path);
1290 } else {
1291 nd->flags &= ~LOOKUP_RCU;
1292 if (!(nd->flags & LOOKUP_ROOT))
1293 nd->root.mnt = NULL;
1294 rcu_read_unlock();
1295 br_read_unlock(&vfsmount_lock);
1296 }
1297 }
1298
1299 /*
1300 * Do we need to follow links? We _really_ want to be able
1301 * to do this check without having to look at inode->i_op,
1302 * so we keep a cache of "no, this doesn't need follow_link"
1303 * for the common case.
1304 */
1305 static inline int should_follow_link(struct inode *inode, int follow)
1306 {
1307 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1308 if (likely(inode->i_op->follow_link))
1309 return follow;
1310
1311 /* This gets set once for the inode lifetime */
1312 spin_lock(&inode->i_lock);
1313 inode->i_opflags |= IOP_NOFOLLOW;
1314 spin_unlock(&inode->i_lock);
1315 }
1316 return 0;
1317 }
1318
1319 static inline int walk_component(struct nameidata *nd, struct path *path,
1320 struct qstr *name, int type, int follow)
1321 {
1322 struct inode *inode;
1323 int err;
1324 /*
1325 * "." and ".." are special - ".." especially so because it has
1326 * to be able to know about the current root directory and
1327 * parent relationships.
1328 */
1329 if (unlikely(type != LAST_NORM))
1330 return handle_dots(nd, type);
1331 err = lookup_fast(nd, name, path, &inode);
1332 if (unlikely(err)) {
1333 if (err < 0)
1334 goto out_err;
1335
1336 err = lookup_slow(nd, name, path);
1337 if (err < 0)
1338 goto out_err;
1339
1340 inode = path->dentry->d_inode;
1341 }
1342 err = -ENOENT;
1343 if (!inode)
1344 goto out_path_put;
1345
1346 if (should_follow_link(inode, follow)) {
1347 if (nd->flags & LOOKUP_RCU) {
1348 if (unlikely(unlazy_walk(nd, path->dentry))) {
1349 err = -ECHILD;
1350 goto out_err;
1351 }
1352 }
1353 BUG_ON(inode != path->dentry->d_inode);
1354 return 1;
1355 }
1356 path_to_nameidata(path, nd);
1357 nd->inode = inode;
1358 return 0;
1359
1360 out_path_put:
1361 path_to_nameidata(path, nd);
1362 out_err:
1363 terminate_walk(nd);
1364 return err;
1365 }
1366
1367 /*
1368 * This limits recursive symlink follows to 8, while
1369 * limiting consecutive symlinks to 40.
1370 *
1371 * Without that kind of total limit, nasty chains of consecutive
1372 * symlinks can cause almost arbitrarily long lookups.
1373 */
1374 static inline int nested_symlink(struct path *path, struct nameidata *nd)
1375 {
1376 int res;
1377
1378 if (unlikely(current->link_count >= MAX_NESTED_LINKS)) {
1379 path_put_conditional(path, nd);
1380 path_put(&nd->path);
1381 return -ELOOP;
1382 }
1383 BUG_ON(nd->depth >= MAX_NESTED_LINKS);
1384
1385 nd->depth++;
1386 current->link_count++;
1387
1388 do {
1389 struct path link = *path;
1390 void *cookie;
1391
1392 res = follow_link(&link, nd, &cookie);
1393 if (res)
1394 break;
1395 res = walk_component(nd, path, &nd->last,
1396 nd->last_type, LOOKUP_FOLLOW);
1397 put_link(nd, &link, cookie);
1398 } while (res > 0);
1399
1400 current->link_count--;
1401 nd->depth--;
1402 return res;
1403 }
1404
1405 /*
1406 * We really don't want to look at inode->i_op->lookup
1407 * when we don't have to. So we keep a cache bit in
1408 * the inode ->i_opflags field that says "yes, we can
1409 * do lookup on this inode".
1410 */
1411 static inline int can_lookup(struct inode *inode)
1412 {
1413 if (likely(inode->i_opflags & IOP_LOOKUP))
1414 return 1;
1415 if (likely(!inode->i_op->lookup))
1416 return 0;
1417
1418 /* We do this once for the lifetime of the inode */
1419 spin_lock(&inode->i_lock);
1420 inode->i_opflags |= IOP_LOOKUP;
1421 spin_unlock(&inode->i_lock);
1422 return 1;
1423 }
1424
1425 /*
1426 * We can do the critical dentry name comparison and hashing
1427 * operations one word at a time, but we are limited to:
1428 *
1429 * - Architectures with fast unaligned word accesses. We could
1430 * do a "get_unaligned()" if this helps and is sufficiently
1431 * fast.
1432 *
1433 * - Little-endian machines (so that we can generate the mask
1434 * of low bytes efficiently). Again, we *could* do a byte
1435 * swapping load on big-endian architectures if that is not
1436 * expensive enough to make the optimization worthless.
1437 *
1438 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
1439 * do not trap on the (extremely unlikely) case of a page
1440 * crossing operation.
1441 *
1442 * - Furthermore, we need an efficient 64-bit compile for the
1443 * 64-bit case in order to generate the "number of bytes in
1444 * the final mask". Again, that could be replaced with a
1445 * efficient population count instruction or similar.
1446 */
1447 #ifdef CONFIG_DCACHE_WORD_ACCESS
1448
1449 #include <asm/word-at-a-time.h>
1450
1451 #ifdef CONFIG_64BIT
1452
1453 static inline unsigned int fold_hash(unsigned long hash)
1454 {
1455 hash += hash >> (8*sizeof(int));
1456 return hash;
1457 }
1458
1459 #else /* 32-bit case */
1460
1461 #define fold_hash(x) (x)
1462
1463 #endif
1464
1465 unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1466 {
1467 unsigned long a, mask;
1468 unsigned long hash = 0;
1469
1470 for (;;) {
1471 a = load_unaligned_zeropad(name);
1472 if (len < sizeof(unsigned long))
1473 break;
1474 hash += a;
1475 hash *= 9;
1476 name += sizeof(unsigned long);
1477 len -= sizeof(unsigned long);
1478 if (!len)
1479 goto done;
1480 }
1481 mask = ~(~0ul << len*8);
1482 hash += mask & a;
1483 done:
1484 return fold_hash(hash);
1485 }
1486 EXPORT_SYMBOL(full_name_hash);
1487
1488 /*
1489 * Calculate the length and hash of the path component, and
1490 * return the length of the component;
1491 */
1492 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
1493 {
1494 unsigned long a, b, adata, bdata, mask, hash, len;
1495 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
1496
1497 hash = a = 0;
1498 len = -sizeof(unsigned long);
1499 do {
1500 hash = (hash + a) * 9;
1501 len += sizeof(unsigned long);
1502 a = load_unaligned_zeropad(name+len);
1503 b = a ^ REPEAT_BYTE('/');
1504 } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
1505
1506 adata = prep_zero_mask(a, adata, &constants);
1507 bdata = prep_zero_mask(b, bdata, &constants);
1508
1509 mask = create_zero_mask(adata | bdata);
1510
1511 hash += a & zero_bytemask(mask);
1512 *hashp = fold_hash(hash);
1513
1514 return len + find_zero(mask);
1515 }
1516
1517 #else
1518
1519 unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1520 {
1521 unsigned long hash = init_name_hash();
1522 while (len--)
1523 hash = partial_name_hash(*name++, hash);
1524 return end_name_hash(hash);
1525 }
1526 EXPORT_SYMBOL(full_name_hash);
1527
1528 /*
1529 * We know there's a real path component here of at least
1530 * one character.
1531 */
1532 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
1533 {
1534 unsigned long hash = init_name_hash();
1535 unsigned long len = 0, c;
1536
1537 c = (unsigned char)*name;
1538 do {
1539 len++;
1540 hash = partial_name_hash(c, hash);
1541 c = (unsigned char)name[len];
1542 } while (c && c != '/');
1543 *hashp = end_name_hash(hash);
1544 return len;
1545 }
1546
1547 #endif
1548
1549 /*
1550 * Name resolution.
1551 * This is the basic name resolution function, turning a pathname into
1552 * the final dentry. We expect 'base' to be positive and a directory.
1553 *
1554 * Returns 0 and nd will have valid dentry and mnt on success.
1555 * Returns error and drops reference to input namei data on failure.
1556 */
1557 static int link_path_walk(const char *name, struct nameidata *nd)
1558 {
1559 struct path next;
1560 int err;
1561
1562 while (*name=='/')
1563 name++;
1564 if (!*name)
1565 return 0;
1566
1567 /* At this point we know we have a real path component. */
1568 for(;;) {
1569 struct qstr this;
1570 long len;
1571 int type;
1572
1573 err = may_lookup(nd);
1574 if (err)
1575 break;
1576
1577 len = hash_name(name, &this.hash);
1578 this.name = name;
1579 this.len = len;
1580
1581 type = LAST_NORM;
1582 if (name[0] == '.') switch (len) {
1583 case 2:
1584 if (name[1] == '.') {
1585 type = LAST_DOTDOT;
1586 nd->flags |= LOOKUP_JUMPED;
1587 }
1588 break;
1589 case 1:
1590 type = LAST_DOT;
1591 }
1592 if (likely(type == LAST_NORM)) {
1593 struct dentry *parent = nd->path.dentry;
1594 nd->flags &= ~LOOKUP_JUMPED;
1595 if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
1596 err = parent->d_op->d_hash(parent, nd->inode,
1597 &this);
1598 if (err < 0)
1599 break;
1600 }
1601 }
1602
1603 if (!name[len])
1604 goto last_component;
1605 /*
1606 * If it wasn't NUL, we know it was '/'. Skip that
1607 * slash, and continue until no more slashes.
1608 */
1609 do {
1610 len++;
1611 } while (unlikely(name[len] == '/'));
1612 if (!name[len])
1613 goto last_component;
1614 name += len;
1615
1616 err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW);
1617 if (err < 0)
1618 return err;
1619
1620 if (err) {
1621 err = nested_symlink(&next, nd);
1622 if (err)
1623 return err;
1624 }
1625 if (can_lookup(nd->inode))
1626 continue;
1627 err = -ENOTDIR;
1628 break;
1629 /* here ends the main loop */
1630
1631 last_component:
1632 nd->last = this;
1633 nd->last_type = type;
1634 return 0;
1635 }
1636 terminate_walk(nd);
1637 return err;
1638 }
1639
1640 static int path_init(int dfd, const char *name, unsigned int flags,
1641 struct nameidata *nd, struct file **fp)
1642 {
1643 int retval = 0;
1644 int fput_needed;
1645 struct file *file;
1646
1647 nd->last_type = LAST_ROOT; /* if there are only slashes... */
1648 nd->flags = flags | LOOKUP_JUMPED;
1649 nd->depth = 0;
1650 if (flags & LOOKUP_ROOT) {
1651 struct inode *inode = nd->root.dentry->d_inode;
1652 if (*name) {
1653 if (!inode->i_op->lookup)
1654 return -ENOTDIR;
1655 retval = inode_permission(inode, MAY_EXEC);
1656 if (retval)
1657 return retval;
1658 }
1659 nd->path = nd->root;
1660 nd->inode = inode;
1661 if (flags & LOOKUP_RCU) {
1662 br_read_lock(&vfsmount_lock);
1663 rcu_read_lock();
1664 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1665 } else {
1666 path_get(&nd->path);
1667 }
1668 return 0;
1669 }
1670
1671 nd->root.mnt = NULL;
1672
1673 if (*name=='/') {
1674 if (flags & LOOKUP_RCU) {
1675 br_read_lock(&vfsmount_lock);
1676 rcu_read_lock();
1677 set_root_rcu(nd);
1678 } else {
1679 set_root(nd);
1680 path_get(&nd->root);
1681 }
1682 nd->path = nd->root;
1683 } else if (dfd == AT_FDCWD) {
1684 if (flags & LOOKUP_RCU) {
1685 struct fs_struct *fs = current->fs;
1686 unsigned seq;
1687
1688 br_read_lock(&vfsmount_lock);
1689 rcu_read_lock();
1690
1691 do {
1692 seq = read_seqcount_begin(&fs->seq);
1693 nd->path = fs->pwd;
1694 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1695 } while (read_seqcount_retry(&fs->seq, seq));
1696 } else {
1697 get_fs_pwd(current->fs, &nd->path);
1698 }
1699 } else {
1700 struct dentry *dentry;
1701
1702 file = fget_raw_light(dfd, &fput_needed);
1703 retval = -EBADF;
1704 if (!file)
1705 goto out_fail;
1706
1707 dentry = file->f_path.dentry;
1708
1709 if (*name) {
1710 retval = -ENOTDIR;
1711 if (!S_ISDIR(dentry->d_inode->i_mode))
1712 goto fput_fail;
1713
1714 retval = inode_permission(dentry->d_inode, MAY_EXEC);
1715 if (retval)
1716 goto fput_fail;
1717 }
1718
1719 nd->path = file->f_path;
1720 if (flags & LOOKUP_RCU) {
1721 if (fput_needed)
1722 *fp = file;
1723 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1724 br_read_lock(&vfsmount_lock);
1725 rcu_read_lock();
1726 } else {
1727 path_get(&file->f_path);
1728 fput_light(file, fput_needed);
1729 }
1730 }
1731
1732 nd->inode = nd->path.dentry->d_inode;
1733 return 0;
1734
1735 fput_fail:
1736 fput_light(file, fput_needed);
1737 out_fail:
1738 return retval;
1739 }
1740
1741 static inline int lookup_last(struct nameidata *nd, struct path *path)
1742 {
1743 if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
1744 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
1745
1746 nd->flags &= ~LOOKUP_PARENT;
1747 return walk_component(nd, path, &nd->last, nd->last_type,
1748 nd->flags & LOOKUP_FOLLOW);
1749 }
1750
1751 /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
1752 static int path_lookupat(int dfd, const char *name,
1753 unsigned int flags, struct nameidata *nd)
1754 {
1755 struct file *base = NULL;
1756 struct path path;
1757 int err;
1758
1759 /*
1760 * Path walking is largely split up into 2 different synchronisation
1761 * schemes, rcu-walk and ref-walk (explained in
1762 * Documentation/filesystems/path-lookup.txt). These share much of the
1763 * path walk code, but some things particularly setup, cleanup, and
1764 * following mounts are sufficiently divergent that functions are
1765 * duplicated. Typically there is a function foo(), and its RCU
1766 * analogue, foo_rcu().
1767 *
1768 * -ECHILD is the error number of choice (just to avoid clashes) that
1769 * is returned if some aspect of an rcu-walk fails. Such an error must
1770 * be handled by restarting a traditional ref-walk (which will always
1771 * be able to complete).
1772 */
1773 err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base);
1774
1775 if (unlikely(err))
1776 return err;
1777
1778 current->total_link_count = 0;
1779 err = link_path_walk(name, nd);
1780
1781 if (!err && !(flags & LOOKUP_PARENT)) {
1782 err = lookup_last(nd, &path);
1783 while (err > 0) {
1784 void *cookie;
1785 struct path link = path;
1786 nd->flags |= LOOKUP_PARENT;
1787 err = follow_link(&link, nd, &cookie);
1788 if (err)
1789 break;
1790 err = lookup_last(nd, &path);
1791 put_link(nd, &link, cookie);
1792 }
1793 }
1794
1795 if (!err)
1796 err = complete_walk(nd);
1797
1798 if (!err && nd->flags & LOOKUP_DIRECTORY) {
1799 if (!nd->inode->i_op->lookup) {
1800 path_put(&nd->path);
1801 err = -ENOTDIR;
1802 }
1803 }
1804
1805 if (base)
1806 fput(base);
1807
1808 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
1809 path_put(&nd->root);
1810 nd->root.mnt = NULL;
1811 }
1812 return err;
1813 }
1814
1815 static int do_path_lookup(int dfd, const char *name,
1816 unsigned int flags, struct nameidata *nd)
1817 {
1818 int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd);
1819 if (unlikely(retval == -ECHILD))
1820 retval = path_lookupat(dfd, name, flags, nd);
1821 if (unlikely(retval == -ESTALE))
1822 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
1823
1824 if (likely(!retval)) {
1825 if (unlikely(!audit_dummy_context())) {
1826 if (nd->path.dentry && nd->inode)
1827 audit_inode(name, nd->path.dentry);
1828 }
1829 }
1830 return retval;
1831 }
1832
1833 int kern_path_parent(const char *name, struct nameidata *nd)
1834 {
1835 return do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, nd);
1836 }
1837
1838 int kern_path(const char *name, unsigned int flags, struct path *path)
1839 {
1840 struct nameidata nd;
1841 int res = do_path_lookup(AT_FDCWD, name, flags, &nd);
1842 if (!res)
1843 *path = nd.path;
1844 return res;
1845 }
1846
1847 /**
1848 * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
1849 * @dentry: pointer to dentry of the base directory
1850 * @mnt: pointer to vfs mount of the base directory
1851 * @name: pointer to file name
1852 * @flags: lookup flags
1853 * @path: pointer to struct path to fill
1854 */
1855 int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
1856 const char *name, unsigned int flags,
1857 struct path *path)
1858 {
1859 struct nameidata nd;
1860 int err;
1861 nd.root.dentry = dentry;
1862 nd.root.mnt = mnt;
1863 BUG_ON(flags & LOOKUP_PARENT);
1864 /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */
1865 err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd);
1866 if (!err)
1867 *path = nd.path;
1868 return err;
1869 }
1870
1871 /*
1872 * Restricted form of lookup. Doesn't follow links, single-component only,
1873 * needs parent already locked. Doesn't follow mounts.
1874 * SMP-safe.
1875 */
1876 static struct dentry *lookup_hash(struct nameidata *nd)
1877 {
1878 return __lookup_hash(&nd->last, nd->path.dentry, nd);
1879 }
1880
1881 /**
1882 * lookup_one_len - filesystem helper to lookup single pathname component
1883 * @name: pathname component to lookup
1884 * @base: base directory to lookup from
1885 * @len: maximum length @len should be interpreted to
1886 *
1887 * Note that this routine is purely a helper for filesystem usage and should
1888 * not be called by generic code. Also note that by using this function the
1889 * nameidata argument is passed to the filesystem methods and a filesystem
1890 * using this helper needs to be prepared for that.
1891 */
1892 struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
1893 {
1894 struct qstr this;
1895 unsigned int c;
1896 int err;
1897
1898 WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
1899
1900 this.name = name;
1901 this.len = len;
1902 this.hash = full_name_hash(name, len);
1903 if (!len)
1904 return ERR_PTR(-EACCES);
1905
1906 while (len--) {
1907 c = *(const unsigned char *)name++;
1908 if (c == '/' || c == '\0')
1909 return ERR_PTR(-EACCES);
1910 }
1911 /*
1912 * See if the low-level filesystem might want
1913 * to use its own hash..
1914 */
1915 if (base->d_flags & DCACHE_OP_HASH) {
1916 int err = base->d_op->d_hash(base, base->d_inode, &this);
1917 if (err < 0)
1918 return ERR_PTR(err);
1919 }
1920
1921 err = inode_permission(base->d_inode, MAY_EXEC);
1922 if (err)
1923 return ERR_PTR(err);
1924
1925 return __lookup_hash(&this, base, NULL);
1926 }
1927
1928 int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
1929 struct path *path, int *empty)
1930 {
1931 struct nameidata nd;
1932 char *tmp = getname_flags(name, flags, empty);
1933 int err = PTR_ERR(tmp);
1934 if (!IS_ERR(tmp)) {
1935
1936 BUG_ON(flags & LOOKUP_PARENT);
1937
1938 err = do_path_lookup(dfd, tmp, flags, &nd);
1939 putname(tmp);
1940 if (!err)
1941 *path = nd.path;
1942 }
1943 return err;
1944 }
1945
1946 int user_path_at(int dfd, const char __user *name, unsigned flags,
1947 struct path *path)
1948 {
1949 return user_path_at_empty(dfd, name, flags, path, NULL);
1950 }
1951
1952 static int user_path_parent(int dfd, const char __user *path,
1953 struct nameidata *nd, char **name)
1954 {
1955 char *s = getname(path);
1956 int error;
1957
1958 if (IS_ERR(s))
1959 return PTR_ERR(s);
1960
1961 error = do_path_lookup(dfd, s, LOOKUP_PARENT, nd);
1962 if (error)
1963 putname(s);
1964 else
1965 *name = s;
1966
1967 return error;
1968 }
1969
1970 /*
1971 * It's inline, so penalty for filesystems that don't use sticky bit is
1972 * minimal.
1973 */
1974 static inline int check_sticky(struct inode *dir, struct inode *inode)
1975 {
1976 kuid_t fsuid = current_fsuid();
1977
1978 if (!(dir->i_mode & S_ISVTX))
1979 return 0;
1980 if (uid_eq(inode->i_uid, fsuid))
1981 return 0;
1982 if (uid_eq(dir->i_uid, fsuid))
1983 return 0;
1984 return !inode_capable(inode, CAP_FOWNER);
1985 }
1986
1987 /*
1988 * Check whether we can remove a link victim from directory dir, check
1989 * whether the type of victim is right.
1990 * 1. We can't do it if dir is read-only (done in permission())
1991 * 2. We should have write and exec permissions on dir
1992 * 3. We can't remove anything from append-only dir
1993 * 4. We can't do anything with immutable dir (done in permission())
1994 * 5. If the sticky bit on dir is set we should either
1995 * a. be owner of dir, or
1996 * b. be owner of victim, or
1997 * c. have CAP_FOWNER capability
1998 * 6. If the victim is append-only or immutable we can't do antyhing with
1999 * links pointing to it.
2000 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
2001 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
2002 * 9. We can't remove a root or mountpoint.
2003 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
2004 * nfs_async_unlink().
2005 */
2006 static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
2007 {
2008 int error;
2009
2010 if (!victim->d_inode)
2011 return -ENOENT;
2012
2013 BUG_ON(victim->d_parent->d_inode != dir);
2014 audit_inode_child(victim, dir);
2015
2016 error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
2017 if (error)
2018 return error;
2019 if (IS_APPEND(dir))
2020 return -EPERM;
2021 if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
2022 IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
2023 return -EPERM;
2024 if (isdir) {
2025 if (!S_ISDIR(victim->d_inode->i_mode))
2026 return -ENOTDIR;
2027 if (IS_ROOT(victim))
2028 return -EBUSY;
2029 } else if (S_ISDIR(victim->d_inode->i_mode))
2030 return -EISDIR;
2031 if (IS_DEADDIR(dir))
2032 return -ENOENT;
2033 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
2034 return -EBUSY;
2035 return 0;
2036 }
2037
2038 /* Check whether we can create an object with dentry child in directory
2039 * dir.
2040 * 1. We can't do it if child already exists (open has special treatment for
2041 * this case, but since we are inlined it's OK)
2042 * 2. We can't do it if dir is read-only (done in permission())
2043 * 3. We should have write and exec permissions on dir
2044 * 4. We can't do it if dir is immutable (done in permission())
2045 */
2046 static inline int may_create(struct inode *dir, struct dentry *child)
2047 {
2048 if (child->d_inode)
2049 return -EEXIST;
2050 if (IS_DEADDIR(dir))
2051 return -ENOENT;
2052 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
2053 }
2054
2055 /*
2056 * p1 and p2 should be directories on the same fs.
2057 */
2058 struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
2059 {
2060 struct dentry *p;
2061
2062 if (p1 == p2) {
2063 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
2064 return NULL;
2065 }
2066
2067 mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
2068
2069 p = d_ancestor(p2, p1);
2070 if (p) {
2071 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT);
2072 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD);
2073 return p;
2074 }
2075
2076 p = d_ancestor(p1, p2);
2077 if (p) {
2078 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
2079 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
2080 return p;
2081 }
2082
2083 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
2084 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
2085 return NULL;
2086 }
2087
2088 void unlock_rename(struct dentry *p1, struct dentry *p2)
2089 {
2090 mutex_unlock(&p1->d_inode->i_mutex);
2091 if (p1 != p2) {
2092 mutex_unlock(&p2->d_inode->i_mutex);
2093 mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
2094 }
2095 }
2096
2097 int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2098 struct nameidata *nd)
2099 {
2100 int error = may_create(dir, dentry);
2101
2102 if (error)
2103 return error;
2104
2105 if (!dir->i_op->create)
2106 return -EACCES; /* shouldn't it be ENOSYS? */
2107 mode &= S_IALLUGO;
2108 mode |= S_IFREG;
2109 error = security_inode_create(dir, dentry, mode);
2110 if (error)
2111 return error;
2112 error = dir->i_op->create(dir, dentry, mode, nd);
2113 if (!error)
2114 fsnotify_create(dir, dentry);
2115 return error;
2116 }
2117
2118 static int may_open(struct path *path, int acc_mode, int flag)
2119 {
2120 struct dentry *dentry = path->dentry;
2121 struct inode *inode = dentry->d_inode;
2122 int error;
2123
2124 /* O_PATH? */
2125 if (!acc_mode)
2126 return 0;
2127
2128 if (!inode)
2129 return -ENOENT;
2130
2131 switch (inode->i_mode & S_IFMT) {
2132 case S_IFLNK:
2133 return -ELOOP;
2134 case S_IFDIR:
2135 if (acc_mode & MAY_WRITE)
2136 return -EISDIR;
2137 break;
2138 case S_IFBLK:
2139 case S_IFCHR:
2140 if (path->mnt->mnt_flags & MNT_NODEV)
2141 return -EACCES;
2142 /*FALLTHRU*/
2143 case S_IFIFO:
2144 case S_IFSOCK:
2145 flag &= ~O_TRUNC;
2146 break;
2147 }
2148
2149 error = inode_permission(inode, acc_mode);
2150 if (error)
2151 return error;
2152
2153 /*
2154 * An append-only file must be opened in append mode for writing.
2155 */
2156 if (IS_APPEND(inode)) {
2157 if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
2158 return -EPERM;
2159 if (flag & O_TRUNC)
2160 return -EPERM;
2161 }
2162
2163 /* O_NOATIME can only be set by the owner or superuser */
2164 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
2165 return -EPERM;
2166
2167 return 0;
2168 }
2169
2170 static int handle_truncate(struct file *filp)
2171 {
2172 struct path *path = &filp->f_path;
2173 struct inode *inode = path->dentry->d_inode;
2174 int error = get_write_access(inode);
2175 if (error)
2176 return error;
2177 /*
2178 * Refuse to truncate files with mandatory locks held on them.
2179 */
2180 error = locks_verify_locked(inode);
2181 if (!error)
2182 error = security_path_truncate(path);
2183 if (!error) {
2184 error = do_truncate(path->dentry, 0,
2185 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
2186 filp);
2187 }
2188 put_write_access(inode);
2189 return error;
2190 }
2191
2192 static inline int open_to_namei_flags(int flag)
2193 {
2194 if ((flag & O_ACCMODE) == 3)
2195 flag--;
2196 return flag;
2197 }
2198
2199 /*
2200 * Handle the last step of open()
2201 */
2202 static struct file *do_last(struct nameidata *nd, struct path *path,
2203 const struct open_flags *op, const char *pathname)
2204 {
2205 struct dentry *dir = nd->path.dentry;
2206 struct dentry *dentry;
2207 int open_flag = op->open_flag;
2208 int will_truncate = open_flag & O_TRUNC;
2209 int want_write = 0;
2210 int acc_mode = op->acc_mode;
2211 struct file *filp;
2212 struct inode *inode;
2213 int symlink_ok = 0;
2214 struct path save_parent = { .dentry = NULL, .mnt = NULL };
2215 bool retried = false;
2216 int error;
2217
2218 nd->flags &= ~LOOKUP_PARENT;
2219 nd->flags |= op->intent;
2220
2221 switch (nd->last_type) {
2222 case LAST_DOTDOT:
2223 case LAST_DOT:
2224 error = handle_dots(nd, nd->last_type);
2225 if (error)
2226 return ERR_PTR(error);
2227 /* fallthrough */
2228 case LAST_ROOT:
2229 error = complete_walk(nd);
2230 if (error)
2231 return ERR_PTR(error);
2232 audit_inode(pathname, nd->path.dentry);
2233 if (open_flag & O_CREAT) {
2234 error = -EISDIR;
2235 goto exit;
2236 }
2237 goto ok;
2238 case LAST_BIND:
2239 error = complete_walk(nd);
2240 if (error)
2241 return ERR_PTR(error);
2242 audit_inode(pathname, dir);
2243 goto ok;
2244 }
2245
2246 if (!(open_flag & O_CREAT)) {
2247 if (nd->last.name[nd->last.len])
2248 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
2249 if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
2250 symlink_ok = 1;
2251 /* we _can_ be in RCU mode here */
2252 error = lookup_fast(nd, &nd->last, path, &inode);
2253 if (unlikely(error)) {
2254 if (error < 0)
2255 goto exit;
2256
2257 error = lookup_slow(nd, &nd->last, path);
2258 if (error < 0)
2259 goto exit;
2260
2261 inode = path->dentry->d_inode;
2262 }
2263 goto finish_lookup;
2264 }
2265
2266 /* create side of things */
2267 /*
2268 * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED has been
2269 * cleared when we got to the last component we are about to look up
2270 */
2271 error = complete_walk(nd);
2272 if (error)
2273 return ERR_PTR(error);
2274
2275 audit_inode(pathname, dir);
2276 error = -EISDIR;
2277 /* trailing slashes? */
2278 if (nd->last.name[nd->last.len])
2279 goto exit;
2280
2281 retry_lookup:
2282 mutex_lock(&dir->d_inode->i_mutex);
2283
2284 dentry = lookup_hash(nd);
2285 error = PTR_ERR(dentry);
2286 if (IS_ERR(dentry)) {
2287 mutex_unlock(&dir->d_inode->i_mutex);
2288 goto exit;
2289 }
2290
2291 path->dentry = dentry;
2292 path->mnt = nd->path.mnt;
2293
2294 /* Negative dentry, just create the file */
2295 if (!dentry->d_inode) {
2296 umode_t mode = op->mode;
2297 if (!IS_POSIXACL(dir->d_inode))
2298 mode &= ~current_umask();
2299 /*
2300 * This write is needed to ensure that a
2301 * rw->ro transition does not occur between
2302 * the time when the file is created and when
2303 * a permanent write count is taken through
2304 * the 'struct file' in nameidata_to_filp().
2305 */
2306 error = mnt_want_write(nd->path.mnt);
2307 if (error)
2308 goto exit_mutex_unlock;
2309 want_write = 1;
2310 /* Don't check for write permission, don't truncate */
2311 open_flag &= ~O_TRUNC;
2312 will_truncate = 0;
2313 acc_mode = MAY_OPEN;
2314 error = security_path_mknod(&nd->path, dentry, mode, 0);
2315 if (error)
2316 goto exit_mutex_unlock;
2317 error = vfs_create(dir->d_inode, dentry, mode, nd);
2318 if (error)
2319 goto exit_mutex_unlock;
2320 mutex_unlock(&dir->d_inode->i_mutex);
2321 dput(nd->path.dentry);
2322 nd->path.dentry = dentry;
2323 goto common;
2324 }
2325
2326 /*
2327 * It already exists.
2328 */
2329 mutex_unlock(&dir->d_inode->i_mutex);
2330 audit_inode(pathname, path->dentry);
2331
2332 error = -EEXIST;
2333 if (open_flag & O_EXCL)
2334 goto exit_dput;
2335
2336 error = follow_managed(path, nd->flags);
2337 if (error < 0)
2338 goto exit_dput;
2339
2340 if (error)
2341 nd->flags |= LOOKUP_JUMPED;
2342
2343 BUG_ON(nd->flags & LOOKUP_RCU);
2344 inode = path->dentry->d_inode;
2345 finish_lookup:
2346 /* we _can_ be in RCU mode here */
2347 error = -ENOENT;
2348 if (!inode) {
2349 path_to_nameidata(path, nd);
2350 goto exit;
2351 }
2352
2353 if (should_follow_link(inode, !symlink_ok)) {
2354 if (nd->flags & LOOKUP_RCU) {
2355 if (unlikely(unlazy_walk(nd, path->dentry))) {
2356 error = -ECHILD;
2357 goto exit;
2358 }
2359 }
2360 BUG_ON(inode != path->dentry->d_inode);
2361 return NULL;
2362 }
2363
2364 if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path->mnt) {
2365 path_to_nameidata(path, nd);
2366 } else {
2367 save_parent.dentry = nd->path.dentry;
2368 save_parent.mnt = mntget(path->mnt);
2369 nd->path.dentry = path->dentry;
2370
2371 }
2372 nd->inode = inode;
2373 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
2374 error = complete_walk(nd);
2375 if (error) {
2376 path_put(&save_parent);
2377 return ERR_PTR(error);
2378 }
2379 error = -EISDIR;
2380 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
2381 goto exit;
2382 error = -ENOTDIR;
2383 if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup)
2384 goto exit;
2385 audit_inode(pathname, nd->path.dentry);
2386 ok:
2387 if (!S_ISREG(nd->inode->i_mode))
2388 will_truncate = 0;
2389
2390 if (will_truncate) {
2391 error = mnt_want_write(nd->path.mnt);
2392 if (error)
2393 goto exit;
2394 want_write = 1;
2395 }
2396 common:
2397 error = may_open(&nd->path, acc_mode, open_flag);
2398 if (error)
2399 goto exit;
2400 filp = nameidata_to_filp(nd);
2401 if (filp == ERR_PTR(-EOPENSTALE) && save_parent.dentry && !retried) {
2402 BUG_ON(save_parent.dentry != dir);
2403 path_put(&nd->path);
2404 nd->path = save_parent;
2405 nd->inode = dir->d_inode;
2406 save_parent.mnt = NULL;
2407 save_parent.dentry = NULL;
2408 if (want_write) {
2409 mnt_drop_write(nd->path.mnt);
2410 want_write = 0;
2411 }
2412 retried = true;
2413 goto retry_lookup;
2414 }
2415 if (!IS_ERR(filp)) {
2416 error = ima_file_check(filp, op->acc_mode);
2417 if (error) {
2418 fput(filp);
2419 filp = ERR_PTR(error);
2420 }
2421 }
2422 if (!IS_ERR(filp)) {
2423 if (will_truncate) {
2424 error = handle_truncate(filp);
2425 if (error) {
2426 fput(filp);
2427 filp = ERR_PTR(error);
2428 }
2429 }
2430 }
2431 out:
2432 if (want_write)
2433 mnt_drop_write(nd->path.mnt);
2434 path_put(&save_parent);
2435 terminate_walk(nd);
2436 return filp;
2437
2438 exit_mutex_unlock:
2439 mutex_unlock(&dir->d_inode->i_mutex);
2440 exit_dput:
2441 path_put_conditional(path, nd);
2442 exit:
2443 filp = ERR_PTR(error);
2444 goto out;
2445 }
2446
2447 static struct file *path_openat(int dfd, const char *pathname,
2448 struct nameidata *nd, const struct open_flags *op, int flags)
2449 {
2450 struct file *base = NULL;
2451 struct file *filp;
2452 struct path path;
2453 int error;
2454
2455 filp = get_empty_filp();
2456 if (!filp)
2457 return ERR_PTR(-ENFILE);
2458
2459 filp->f_flags = op->open_flag;
2460 nd->intent.open.file = filp;
2461 nd->intent.open.flags = open_to_namei_flags(op->open_flag);
2462 nd->intent.open.create_mode = op->mode;
2463
2464 error = path_init(dfd, pathname, flags | LOOKUP_PARENT, nd, &base);
2465 if (unlikely(error))
2466 goto out_filp;
2467
2468 current->total_link_count = 0;
2469 error = link_path_walk(pathname, nd);
2470 if (unlikely(error))
2471 goto out_filp;
2472
2473 filp = do_last(nd, &path, op, pathname);
2474 while (unlikely(!filp)) { /* trailing symlink */
2475 struct path link = path;
2476 void *cookie;
2477 if (!(nd->flags & LOOKUP_FOLLOW)) {
2478 path_put_conditional(&path, nd);
2479 path_put(&nd->path);
2480 filp = ERR_PTR(-ELOOP);
2481 break;
2482 }
2483 nd->flags |= LOOKUP_PARENT;
2484 nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
2485 error = follow_link(&link, nd, &cookie);
2486 if (unlikely(error))
2487 goto out_filp;
2488 filp = do_last(nd, &path, op, pathname);
2489 put_link(nd, &link, cookie);
2490 }
2491 out:
2492 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT))
2493 path_put(&nd->root);
2494 if (base)
2495 fput(base);
2496 release_open_intent(nd);
2497 if (filp == ERR_PTR(-EOPENSTALE)) {
2498 if (flags & LOOKUP_RCU)
2499 filp = ERR_PTR(-ECHILD);
2500 else
2501 filp = ERR_PTR(-ESTALE);
2502 }
2503 return filp;
2504
2505 out_filp:
2506 filp = ERR_PTR(error);
2507 goto out;
2508 }
2509
2510 struct file *do_filp_open(int dfd, const char *pathname,
2511 const struct open_flags *op, int flags)
2512 {
2513 struct nameidata nd;
2514 struct file *filp;
2515
2516 filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_RCU);
2517 if (unlikely(filp == ERR_PTR(-ECHILD)))
2518 filp = path_openat(dfd, pathname, &nd, op, flags);
2519 if (unlikely(filp == ERR_PTR(-ESTALE)))
2520 filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_REVAL);
2521 return filp;
2522 }
2523
2524 struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
2525 const char *name, const struct open_flags *op, int flags)
2526 {
2527 struct nameidata nd;
2528 struct file *file;
2529
2530 nd.root.mnt = mnt;
2531 nd.root.dentry = dentry;
2532
2533 flags |= LOOKUP_ROOT;
2534
2535 if (dentry->d_inode->i_op->follow_link && op->intent & LOOKUP_OPEN)
2536 return ERR_PTR(-ELOOP);
2537
2538 file = path_openat(-1, name, &nd, op, flags | LOOKUP_RCU);
2539 if (unlikely(file == ERR_PTR(-ECHILD)))
2540 file = path_openat(-1, name, &nd, op, flags);
2541 if (unlikely(file == ERR_PTR(-ESTALE)))
2542 file = path_openat(-1, name, &nd, op, flags | LOOKUP_REVAL);
2543 return file;
2544 }
2545
2546 struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, int is_dir)
2547 {
2548 struct dentry *dentry = ERR_PTR(-EEXIST);
2549 struct nameidata nd;
2550 int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd);
2551 if (error)
2552 return ERR_PTR(error);
2553
2554 /*
2555 * Yucky last component or no last component at all?
2556 * (foo/., foo/.., /////)
2557 */
2558 if (nd.last_type != LAST_NORM)
2559 goto out;
2560 nd.flags &= ~LOOKUP_PARENT;
2561 nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL;
2562 nd.intent.open.flags = O_EXCL;
2563
2564 /*
2565 * Do the final lookup.
2566 */
2567 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
2568 dentry = lookup_hash(&nd);
2569 if (IS_ERR(dentry))
2570 goto fail;
2571
2572 if (dentry->d_inode)
2573 goto eexist;
2574 /*
2575 * Special case - lookup gave negative, but... we had foo/bar/
2576 * From the vfs_mknod() POV we just have a negative dentry -
2577 * all is fine. Let's be bastards - you had / on the end, you've
2578 * been asking for (non-existent) directory. -ENOENT for you.
2579 */
2580 if (unlikely(!is_dir && nd.last.name[nd.last.len])) {
2581 dput(dentry);
2582 dentry = ERR_PTR(-ENOENT);
2583 goto fail;
2584 }
2585 *path = nd.path;
2586 return dentry;
2587 eexist:
2588 dput(dentry);
2589 dentry = ERR_PTR(-EEXIST);
2590 fail:
2591 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
2592 out:
2593 path_put(&nd.path);
2594 return dentry;
2595 }
2596 EXPORT_SYMBOL(kern_path_create);
2597
2598 struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir)
2599 {
2600 char *tmp = getname(pathname);
2601 struct dentry *res;
2602 if (IS_ERR(tmp))
2603 return ERR_CAST(tmp);
2604 res = kern_path_create(dfd, tmp, path, is_dir);
2605 putname(tmp);
2606 return res;
2607 }
2608 EXPORT_SYMBOL(user_path_create);
2609
2610 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2611 {
2612 int error = may_create(dir, dentry);
2613
2614 if (error)
2615 return error;
2616
2617 if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
2618 return -EPERM;
2619
2620 if (!dir->i_op->mknod)
2621 return -EPERM;
2622
2623 error = devcgroup_inode_mknod(mode, dev);
2624 if (error)
2625 return error;
2626
2627 error = security_inode_mknod(dir, dentry, mode, dev);
2628 if (error)
2629 return error;
2630
2631 error = dir->i_op->mknod(dir, dentry, mode, dev);
2632 if (!error)
2633 fsnotify_create(dir, dentry);
2634 return error;
2635 }
2636
2637 static int may_mknod(umode_t mode)
2638 {
2639 switch (mode & S_IFMT) {
2640 case S_IFREG:
2641 case S_IFCHR:
2642 case S_IFBLK:
2643 case S_IFIFO:
2644 case S_IFSOCK:
2645 case 0: /* zero mode translates to S_IFREG */
2646 return 0;
2647 case S_IFDIR:
2648 return -EPERM;
2649 default:
2650 return -EINVAL;
2651 }
2652 }
2653
2654 SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
2655 unsigned, dev)
2656 {
2657 struct dentry *dentry;
2658 struct path path;
2659 int error;
2660
2661 if (S_ISDIR(mode))
2662 return -EPERM;
2663
2664 dentry = user_path_create(dfd, filename, &path, 0);
2665 if (IS_ERR(dentry))
2666 return PTR_ERR(dentry);
2667
2668 if (!IS_POSIXACL(path.dentry->d_inode))
2669 mode &= ~current_umask();
2670 error = may_mknod(mode);
2671 if (error)
2672 goto out_dput;
2673 error = mnt_want_write(path.mnt);
2674 if (error)
2675 goto out_dput;
2676 error = security_path_mknod(&path, dentry, mode, dev);
2677 if (error)
2678 goto out_drop_write;
2679 switch (mode & S_IFMT) {
2680 case 0: case S_IFREG:
2681 error = vfs_create(path.dentry->d_inode,dentry,mode,NULL);
2682 break;
2683 case S_IFCHR: case S_IFBLK:
2684 error = vfs_mknod(path.dentry->d_inode,dentry,mode,
2685 new_decode_dev(dev));
2686 break;
2687 case S_IFIFO: case S_IFSOCK:
2688 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
2689 break;
2690 }
2691 out_drop_write:
2692 mnt_drop_write(path.mnt);
2693 out_dput:
2694 dput(dentry);
2695 mutex_unlock(&path.dentry->d_inode->i_mutex);
2696 path_put(&path);
2697
2698 return error;
2699 }
2700
2701 SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev)
2702 {
2703 return sys_mknodat(AT_FDCWD, filename, mode, dev);
2704 }
2705
2706 int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2707 {
2708 int error = may_create(dir, dentry);
2709 unsigned max_links = dir->i_sb->s_max_links;
2710
2711 if (error)
2712 return error;
2713
2714 if (!dir->i_op->mkdir)
2715 return -EPERM;
2716
2717 mode &= (S_IRWXUGO|S_ISVTX);
2718 error = security_inode_mkdir(dir, dentry, mode);
2719 if (error)
2720 return error;
2721
2722 if (max_links && dir->i_nlink >= max_links)
2723 return -EMLINK;
2724
2725 error = dir->i_op->mkdir(dir, dentry, mode);
2726 if (!error)
2727 fsnotify_mkdir(dir, dentry);
2728 return error;
2729 }
2730
2731 SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
2732 {
2733 struct dentry *dentry;
2734 struct path path;
2735 int error;
2736
2737 dentry = user_path_create(dfd, pathname, &path, 1);
2738 if (IS_ERR(dentry))
2739 return PTR_ERR(dentry);
2740
2741 if (!IS_POSIXACL(path.dentry->d_inode))
2742 mode &= ~current_umask();
2743 error = mnt_want_write(path.mnt);
2744 if (error)
2745 goto out_dput;
2746 error = security_path_mkdir(&path, dentry, mode);
2747 if (error)
2748 goto out_drop_write;
2749 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
2750 out_drop_write:
2751 mnt_drop_write(path.mnt);
2752 out_dput:
2753 dput(dentry);
2754 mutex_unlock(&path.dentry->d_inode->i_mutex);
2755 path_put(&path);
2756 return error;
2757 }
2758
2759 SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
2760 {
2761 return sys_mkdirat(AT_FDCWD, pathname, mode);
2762 }
2763
2764 /*
2765 * The dentry_unhash() helper will try to drop the dentry early: we
2766 * should have a usage count of 1 if we're the only user of this
2767 * dentry, and if that is true (possibly after pruning the dcache),
2768 * then we drop the dentry now.
2769 *
2770 * A low-level filesystem can, if it choses, legally
2771 * do a
2772 *
2773 * if (!d_unhashed(dentry))
2774 * return -EBUSY;
2775 *
2776 * if it cannot handle the case of removing a directory
2777 * that is still in use by something else..
2778 */
2779 void dentry_unhash(struct dentry *dentry)
2780 {
2781 shrink_dcache_parent(dentry);
2782 spin_lock(&dentry->d_lock);
2783 if (dentry->d_count == 1)
2784 __d_drop(dentry);
2785 spin_unlock(&dentry->d_lock);
2786 }
2787
2788 int vfs_rmdir(struct inode *dir, struct dentry *dentry)
2789 {
2790 int error = may_delete(dir, dentry, 1);
2791
2792 if (error)
2793 return error;
2794
2795 if (!dir->i_op->rmdir)
2796 return -EPERM;
2797
2798 dget(dentry);
2799 mutex_lock(&dentry->d_inode->i_mutex);
2800
2801 error = -EBUSY;
2802 if (d_mountpoint(dentry))
2803 goto out;
2804
2805 error = security_inode_rmdir(dir, dentry);
2806 if (error)
2807 goto out;
2808
2809 shrink_dcache_parent(dentry);
2810 error = dir->i_op->rmdir(dir, dentry);
2811 if (error)
2812 goto out;
2813
2814 dentry->d_inode->i_flags |= S_DEAD;
2815 dont_mount(dentry);
2816
2817 out:
2818 mutex_unlock(&dentry->d_inode->i_mutex);
2819 dput(dentry);
2820 if (!error)
2821 d_delete(dentry);
2822 return error;
2823 }
2824
2825 static long do_rmdir(int dfd, const char __user *pathname)
2826 {
2827 int error = 0;
2828 char * name;
2829 struct dentry *dentry;
2830 struct nameidata nd;
2831
2832 error = user_path_parent(dfd, pathname, &nd, &name);
2833 if (error)
2834 return error;
2835
2836 switch(nd.last_type) {
2837 case LAST_DOTDOT:
2838 error = -ENOTEMPTY;
2839 goto exit1;
2840 case LAST_DOT:
2841 error = -EINVAL;
2842 goto exit1;
2843 case LAST_ROOT:
2844 error = -EBUSY;
2845 goto exit1;
2846 }
2847
2848 nd.flags &= ~LOOKUP_PARENT;
2849
2850 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
2851 dentry = lookup_hash(&nd);
2852 error = PTR_ERR(dentry);
2853 if (IS_ERR(dentry))
2854 goto exit2;
2855 if (!dentry->d_inode) {
2856 error = -ENOENT;
2857 goto exit3;
2858 }
2859 error = mnt_want_write(nd.path.mnt);
2860 if (error)
2861 goto exit3;
2862 error = security_path_rmdir(&nd.path, dentry);
2863 if (error)
2864 goto exit4;
2865 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
2866 exit4:
2867 mnt_drop_write(nd.path.mnt);
2868 exit3:
2869 dput(dentry);
2870 exit2:
2871 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
2872 exit1:
2873 path_put(&nd.path);
2874 putname(name);
2875 return error;
2876 }
2877
2878 SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
2879 {
2880 return do_rmdir(AT_FDCWD, pathname);
2881 }
2882
2883 int vfs_unlink(struct inode *dir, struct dentry *dentry)
2884 {
2885 int error = may_delete(dir, dentry, 0);
2886
2887 if (error)
2888 return error;
2889
2890 if (!dir->i_op->unlink)
2891 return -EPERM;
2892
2893 mutex_lock(&dentry->d_inode->i_mutex);
2894 if (d_mountpoint(dentry))
2895 error = -EBUSY;
2896 else {
2897 error = security_inode_unlink(dir, dentry);
2898 if (!error) {
2899 error = dir->i_op->unlink(dir, dentry);
2900 if (!error)
2901 dont_mount(dentry);
2902 }
2903 }
2904 mutex_unlock(&dentry->d_inode->i_mutex);
2905
2906 /* We don't d_delete() NFS sillyrenamed files--they still exist. */
2907 if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
2908 fsnotify_link_count(dentry->d_inode);
2909 d_delete(dentry);
2910 }
2911
2912 return error;
2913 }
2914
2915 /*
2916 * Make sure that the actual truncation of the file will occur outside its
2917 * directory's i_mutex. Truncate can take a long time if there is a lot of
2918 * writeout happening, and we don't want to prevent access to the directory
2919 * while waiting on the I/O.
2920 */
2921 static long do_unlinkat(int dfd, const char __user *pathname)
2922 {
2923 int error;
2924 char *name;
2925 struct dentry *dentry;
2926 struct nameidata nd;
2927 struct inode *inode = NULL;
2928
2929 error = user_path_parent(dfd, pathname, &nd, &name);
2930 if (error)
2931 return error;
2932
2933 error = -EISDIR;
2934 if (nd.last_type != LAST_NORM)
2935 goto exit1;
2936
2937 nd.flags &= ~LOOKUP_PARENT;
2938
2939 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
2940 dentry = lookup_hash(&nd);
2941 error = PTR_ERR(dentry);
2942 if (!IS_ERR(dentry)) {
2943 /* Why not before? Because we want correct error value */
2944 if (nd.last.name[nd.last.len])
2945 goto slashes;
2946 inode = dentry->d_inode;
2947 if (!inode)
2948 goto slashes;
2949 ihold(inode);
2950 error = mnt_want_write(nd.path.mnt);
2951 if (error)
2952 goto exit2;
2953 error = security_path_unlink(&nd.path, dentry);
2954 if (error)
2955 goto exit3;
2956 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
2957 exit3:
2958 mnt_drop_write(nd.path.mnt);
2959 exit2:
2960 dput(dentry);
2961 }
2962 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
2963 if (inode)
2964 iput(inode); /* truncate the inode here */
2965 exit1:
2966 path_put(&nd.path);
2967 putname(name);
2968 return error;
2969
2970 slashes:
2971 error = !dentry->d_inode ? -ENOENT :
2972 S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
2973 goto exit2;
2974 }
2975
2976 SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
2977 {
2978 if ((flag & ~AT_REMOVEDIR) != 0)
2979 return -EINVAL;
2980
2981 if (flag & AT_REMOVEDIR)
2982 return do_rmdir(dfd, pathname);
2983
2984 return do_unlinkat(dfd, pathname);
2985 }
2986
2987 SYSCALL_DEFINE1(unlink, const char __user *, pathname)
2988 {
2989 return do_unlinkat(AT_FDCWD, pathname);
2990 }
2991
2992 int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
2993 {
2994 int error = may_create(dir, dentry);
2995
2996 if (error)
2997 return error;
2998
2999 if (!dir->i_op->symlink)
3000 return -EPERM;
3001
3002 error = security_inode_symlink(dir, dentry, oldname);
3003 if (error)
3004 return error;
3005
3006 error = dir->i_op->symlink(dir, dentry, oldname);
3007 if (!error)
3008 fsnotify_create(dir, dentry);
3009 return error;
3010 }
3011
3012 SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
3013 int, newdfd, const char __user *, newname)
3014 {
3015 int error;
3016 char *from;
3017 struct dentry *dentry;
3018 struct path path;
3019
3020 from = getname(oldname);
3021 if (IS_ERR(from))
3022 return PTR_ERR(from);
3023
3024 dentry = user_path_create(newdfd, newname, &path, 0);
3025 error = PTR_ERR(dentry);
3026 if (IS_ERR(dentry))
3027 goto out_putname;
3028
3029 error = mnt_want_write(path.mnt);
3030 if (error)
3031 goto out_dput;
3032 error = security_path_symlink(&path, dentry, from);
3033 if (error)
3034 goto out_drop_write;
3035 error = vfs_symlink(path.dentry->d_inode, dentry, from);
3036 out_drop_write:
3037 mnt_drop_write(path.mnt);
3038 out_dput:
3039 dput(dentry);
3040 mutex_unlock(&path.dentry->d_inode->i_mutex);
3041 path_put(&path);
3042 out_putname:
3043 putname(from);
3044 return error;
3045 }
3046
3047 SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname)
3048 {
3049 return sys_symlinkat(oldname, AT_FDCWD, newname);
3050 }
3051
3052 int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
3053 {
3054 struct inode *inode = old_dentry->d_inode;
3055 unsigned max_links = dir->i_sb->s_max_links;
3056 int error;
3057
3058 if (!inode)
3059 return -ENOENT;
3060
3061 error = may_create(dir, new_dentry);
3062 if (error)
3063 return error;
3064
3065 if (dir->i_sb != inode->i_sb)
3066 return -EXDEV;
3067
3068 /*
3069 * A link to an append-only or immutable file cannot be created.
3070 */
3071 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
3072 return -EPERM;
3073 if (!dir->i_op->link)
3074 return -EPERM;
3075 if (S_ISDIR(inode->i_mode))
3076 return -EPERM;
3077
3078 error = security_inode_link(old_dentry, dir, new_dentry);
3079 if (error)
3080 return error;
3081
3082 mutex_lock(&inode->i_mutex);
3083 /* Make sure we don't allow creating hardlink to an unlinked file */
3084 if (inode->i_nlink == 0)
3085 error = -ENOENT;
3086 else if (max_links && inode->i_nlink >= max_links)
3087 error = -EMLINK;
3088 else
3089 error = dir->i_op->link(old_dentry, dir, new_dentry);
3090 mutex_unlock(&inode->i_mutex);
3091 if (!error)
3092 fsnotify_link(dir, inode, new_dentry);
3093 return error;
3094 }
3095
3096 /*
3097 * Hardlinks are often used in delicate situations. We avoid
3098 * security-related surprises by not following symlinks on the
3099 * newname. --KAB
3100 *
3101 * We don't follow them on the oldname either to be compatible
3102 * with linux 2.0, and to avoid hard-linking to directories
3103 * and other special files. --ADM
3104 */
3105 SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
3106 int, newdfd, const char __user *, newname, int, flags)
3107 {
3108 struct dentry *new_dentry;
3109 struct path old_path, new_path;
3110 int how = 0;
3111 int error;
3112
3113 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
3114 return -EINVAL;
3115 /*
3116 * To use null names we require CAP_DAC_READ_SEARCH
3117 * This ensures that not everyone will be able to create
3118 * handlink using the passed filedescriptor.
3119 */
3120 if (flags & AT_EMPTY_PATH) {
3121 if (!capable(CAP_DAC_READ_SEARCH))
3122 return -ENOENT;
3123 how = LOOKUP_EMPTY;
3124 }
3125
3126 if (flags & AT_SYMLINK_FOLLOW)
3127 how |= LOOKUP_FOLLOW;
3128
3129 error = user_path_at(olddfd, oldname, how, &old_path);
3130 if (error)
3131 return error;
3132
3133 new_dentry = user_path_create(newdfd, newname, &new_path, 0);
3134 error = PTR_ERR(new_dentry);
3135 if (IS_ERR(new_dentry))
3136 goto out;
3137
3138 error = -EXDEV;
3139 if (old_path.mnt != new_path.mnt)
3140 goto out_dput;
3141 error = mnt_want_write(new_path.mnt);
3142 if (error)
3143 goto out_dput;
3144 error = security_path_link(old_path.dentry, &new_path, new_dentry);
3145 if (error)
3146 goto out_drop_write;
3147 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
3148 out_drop_write:
3149 mnt_drop_write(new_path.mnt);
3150 out_dput:
3151 dput(new_dentry);
3152 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
3153 path_put(&new_path);
3154 out:
3155 path_put(&old_path);
3156
3157 return error;
3158 }
3159
3160 SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname)
3161 {
3162 return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
3163 }
3164
3165 /*
3166 * The worst of all namespace operations - renaming directory. "Perverted"
3167 * doesn't even start to describe it. Somebody in UCB had a heck of a trip...
3168 * Problems:
3169 * a) we can get into loop creation. Check is done in is_subdir().
3170 * b) race potential - two innocent renames can create a loop together.
3171 * That's where 4.4 screws up. Current fix: serialization on
3172 * sb->s_vfs_rename_mutex. We might be more accurate, but that's another
3173 * story.
3174 * c) we have to lock _three_ objects - parents and victim (if it exists).
3175 * And that - after we got ->i_mutex on parents (until then we don't know
3176 * whether the target exists). Solution: try to be smart with locking
3177 * order for inodes. We rely on the fact that tree topology may change
3178 * only under ->s_vfs_rename_mutex _and_ that parent of the object we
3179 * move will be locked. Thus we can rank directories by the tree
3180 * (ancestors first) and rank all non-directories after them.
3181 * That works since everybody except rename does "lock parent, lookup,
3182 * lock child" and rename is under ->s_vfs_rename_mutex.
3183 * HOWEVER, it relies on the assumption that any object with ->lookup()
3184 * has no more than 1 dentry. If "hybrid" objects will ever appear,
3185 * we'd better make sure that there's no link(2) for them.
3186 * d) conversion from fhandle to dentry may come in the wrong moment - when
3187 * we are removing the target. Solution: we will have to grab ->i_mutex
3188 * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
3189 * ->i_mutex on parents, which works but leads to some truly excessive
3190 * locking].
3191 */
3192 static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
3193 struct inode *new_dir, struct dentry *new_dentry)
3194 {
3195 int error = 0;
3196 struct inode *target = new_dentry->d_inode;
3197 unsigned max_links = new_dir->i_sb->s_max_links;
3198
3199 /*
3200 * If we are going to change the parent - check write permissions,
3201 * we'll need to flip '..'.
3202 */
3203 if (new_dir != old_dir) {
3204 error = inode_permission(old_dentry->d_inode, MAY_WRITE);
3205 if (error)
3206 return error;
3207 }
3208
3209 error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
3210 if (error)
3211 return error;
3212
3213 dget(new_dentry);
3214 if (target)
3215 mutex_lock(&target->i_mutex);
3216
3217 error = -EBUSY;
3218 if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry))
3219 goto out;
3220
3221 error = -EMLINK;
3222 if (max_links && !target && new_dir != old_dir &&
3223 new_dir->i_nlink >= max_links)
3224 goto out;
3225
3226 if (target)
3227 shrink_dcache_parent(new_dentry);
3228 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
3229 if (error)
3230 goto out;
3231
3232 if (target) {
3233 target->i_flags |= S_DEAD;
3234 dont_mount(new_dentry);
3235 }
3236 out:
3237 if (target)
3238 mutex_unlock(&target->i_mutex);
3239 dput(new_dentry);
3240 if (!error)
3241 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
3242 d_move(old_dentry,new_dentry);
3243 return error;
3244 }
3245
3246 static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
3247 struct inode *new_dir, struct dentry *new_dentry)
3248 {
3249 struct inode *target = new_dentry->d_inode;
3250 int error;
3251
3252 error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
3253 if (error)
3254 return error;
3255
3256 dget(new_dentry);
3257 if (target)
3258 mutex_lock(&target->i_mutex);
3259
3260 error = -EBUSY;
3261 if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
3262 goto out;
3263
3264 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
3265 if (error)
3266 goto out;
3267
3268 if (target)
3269 dont_mount(new_dentry);
3270 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
3271 d_move(old_dentry, new_dentry);
3272 out:
3273 if (target)
3274 mutex_unlock(&target->i_mutex);
3275 dput(new_dentry);
3276 return error;
3277 }
3278
3279 int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
3280 struct inode *new_dir, struct dentry *new_dentry)
3281 {
3282 int error;
3283 int is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
3284 const unsigned char *old_name;
3285
3286 if (old_dentry->d_inode == new_dentry->d_inode)
3287 return 0;
3288
3289 error = may_delete(old_dir, old_dentry, is_dir);
3290 if (error)
3291 return error;
3292
3293 if (!new_dentry->d_inode)
3294 error = may_create(new_dir, new_dentry);
3295 else
3296 error = may_delete(new_dir, new_dentry, is_dir);
3297 if (error)
3298 return error;
3299
3300 if (!old_dir->i_op->rename)
3301 return -EPERM;
3302
3303 old_name = fsnotify_oldname_init(old_dentry->d_name.name);
3304
3305 if (is_dir)
3306 error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry);
3307 else
3308 error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
3309 if (!error)
3310 fsnotify_move(old_dir, new_dir, old_name, is_dir,
3311 new_dentry->d_inode, old_dentry);
3312 fsnotify_oldname_free(old_name);
3313
3314 return error;
3315 }
3316
3317 SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
3318 int, newdfd, const char __user *, newname)
3319 {
3320 struct dentry *old_dir, *new_dir;
3321 struct dentry *old_dentry, *new_dentry;
3322 struct dentry *trap;
3323 struct nameidata oldnd, newnd;
3324 char *from;
3325 char *to;
3326 int error;
3327
3328 error = user_path_parent(olddfd, oldname, &oldnd, &from);
3329 if (error)
3330 goto exit;
3331
3332 error = user_path_parent(newdfd, newname, &newnd, &to);
3333 if (error)
3334 goto exit1;
3335
3336 error = -EXDEV;
3337 if (oldnd.path.mnt != newnd.path.mnt)
3338 goto exit2;
3339
3340 old_dir = oldnd.path.dentry;
3341 error = -EBUSY;
3342 if (oldnd.last_type != LAST_NORM)
3343 goto exit2;
3344
3345 new_dir = newnd.path.dentry;
3346 if (newnd.last_type != LAST_NORM)
3347 goto exit2;
3348
3349 oldnd.flags &= ~LOOKUP_PARENT;
3350 newnd.flags &= ~LOOKUP_PARENT;
3351 newnd.flags |= LOOKUP_RENAME_TARGET;
3352
3353 trap = lock_rename(new_dir, old_dir);
3354
3355 old_dentry = lookup_hash(&oldnd);
3356 error = PTR_ERR(old_dentry);
3357 if (IS_ERR(old_dentry))
3358 goto exit3;
3359 /* source must exist */
3360 error = -ENOENT;
3361 if (!old_dentry->d_inode)
3362 goto exit4;
3363 /* unless the source is a directory trailing slashes give -ENOTDIR */
3364 if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
3365 error = -ENOTDIR;
3366 if (oldnd.last.name[oldnd.last.len])
3367 goto exit4;
3368 if (newnd.last.name[newnd.last.len])
3369 goto exit4;
3370 }
3371 /* source should not be ancestor of target */
3372 error = -EINVAL;
3373 if (old_dentry == trap)
3374 goto exit4;
3375 new_dentry = lookup_hash(&newnd);
3376 error = PTR_ERR(new_dentry);
3377 if (IS_ERR(new_dentry))
3378 goto exit4;
3379 /* target should not be an ancestor of source */
3380 error = -ENOTEMPTY;
3381 if (new_dentry == trap)
3382 goto exit5;
3383
3384 error = mnt_want_write(oldnd.path.mnt);
3385 if (error)
3386 goto exit5;
3387 error = security_path_rename(&oldnd.path, old_dentry,
3388 &newnd.path, new_dentry);
3389 if (error)
3390 goto exit6;
3391 error = vfs_rename(old_dir->d_inode, old_dentry,
3392 new_dir->d_inode, new_dentry);
3393 exit6:
3394 mnt_drop_write(oldnd.path.mnt);
3395 exit5:
3396 dput(new_dentry);
3397 exit4:
3398 dput(old_dentry);
3399 exit3:
3400 unlock_rename(new_dir, old_dir);
3401 exit2:
3402 path_put(&newnd.path);
3403 putname(to);
3404 exit1:
3405 path_put(&oldnd.path);
3406 putname(from);
3407 exit:
3408 return error;
3409 }
3410
3411 SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
3412 {
3413 return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname);
3414 }
3415
3416 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
3417 {
3418 int len;
3419
3420 len = PTR_ERR(link);
3421 if (IS_ERR(link))
3422 goto out;
3423
3424 len = strlen(link);
3425 if (len > (unsigned) buflen)
3426 len = buflen;
3427 if (copy_to_user(buffer, link, len))
3428 len = -EFAULT;
3429 out:
3430 return len;
3431 }
3432
3433 /*
3434 * A helper for ->readlink(). This should be used *ONLY* for symlinks that
3435 * have ->follow_link() touching nd only in nd_set_link(). Using (or not
3436 * using) it for any given inode is up to filesystem.
3437 */
3438 int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
3439 {
3440 struct nameidata nd;
3441 void *cookie;
3442 int res;
3443
3444 nd.depth = 0;
3445 cookie = dentry->d_inode->i_op->follow_link(dentry, &nd);
3446 if (IS_ERR(cookie))
3447 return PTR_ERR(cookie);
3448
3449 res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd));
3450 if (dentry->d_inode->i_op->put_link)
3451 dentry->d_inode->i_op->put_link(dentry, &nd, cookie);
3452 return res;
3453 }
3454
3455 int vfs_follow_link(struct nameidata *nd, const char *link)
3456 {
3457 return __vfs_follow_link(nd, link);
3458 }
3459
3460 /* get the link contents into pagecache */
3461 static char *page_getlink(struct dentry * dentry, struct page **ppage)
3462 {
3463 char *kaddr;
3464 struct page *page;
3465 struct address_space *mapping = dentry->d_inode->i_mapping;
3466 page = read_mapping_page(mapping, 0, NULL);
3467 if (IS_ERR(page))
3468 return (char*)page;
3469 *ppage = page;
3470 kaddr = kmap(page);
3471 nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1);
3472 return kaddr;
3473 }
3474
3475 int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
3476 {
3477 struct page *page = NULL;
3478 char *s = page_getlink(dentry, &page);
3479 int res = vfs_readlink(dentry,buffer,buflen,s);
3480 if (page) {
3481 kunmap(page);
3482 page_cache_release(page);
3483 }
3484 return res;
3485 }
3486
3487 void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
3488 {
3489 struct page *page = NULL;
3490 nd_set_link(nd, page_getlink(dentry, &page));
3491 return page;
3492 }
3493
3494 void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
3495 {
3496 struct page *page = cookie;
3497
3498 if (page) {
3499 kunmap(page);
3500 page_cache_release(page);
3501 }
3502 }
3503
3504 /*
3505 * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
3506 */
3507 int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
3508 {
3509 struct address_space *mapping = inode->i_mapping;
3510 struct page *page;
3511 void *fsdata;
3512 int err;
3513 char *kaddr;
3514 unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE;
3515 if (nofs)
3516 flags |= AOP_FLAG_NOFS;
3517
3518 retry:
3519 err = pagecache_write_begin(NULL, mapping, 0, len-1,
3520 flags, &page, &fsdata);
3521 if (err)
3522 goto fail;
3523
3524 kaddr = kmap_atomic(page);
3525 memcpy(kaddr, symname, len-1);
3526 kunmap_atomic(kaddr);
3527
3528 err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
3529 page, fsdata);
3530 if (err < 0)
3531 goto fail;
3532 if (err < len-1)
3533 goto retry;
3534
3535 mark_inode_dirty(inode);
3536 return 0;
3537 fail:
3538 return err;
3539 }
3540
3541 int page_symlink(struct inode *inode, const char *symname, int len)
3542 {
3543 return __page_symlink(inode, symname, len,
3544 !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS));
3545 }
3546
3547 const struct inode_operations page_symlink_inode_operations = {
3548 .readlink = generic_readlink,
3549 .follow_link = page_follow_link_light,
3550 .put_link = page_put_link,
3551 };
3552
3553 EXPORT_SYMBOL(user_path_at);
3554 EXPORT_SYMBOL(follow_down_one);
3555 EXPORT_SYMBOL(follow_down);
3556 EXPORT_SYMBOL(follow_up);
3557 EXPORT_SYMBOL(get_write_access); /* binfmt_aout */
3558 EXPORT_SYMBOL(getname);
3559 EXPORT_SYMBOL(lock_rename);
3560 EXPORT_SYMBOL(lookup_one_len);
3561 EXPORT_SYMBOL(page_follow_link_light);
3562 EXPORT_SYMBOL(page_put_link);
3563 EXPORT_SYMBOL(page_readlink);
3564 EXPORT_SYMBOL(__page_symlink);
3565 EXPORT_SYMBOL(page_symlink);
3566 EXPORT_SYMBOL(page_symlink_inode_operations);
3567 EXPORT_SYMBOL(kern_path);
3568 EXPORT_SYMBOL(vfs_path_lookup);
3569 EXPORT_SYMBOL(inode_permission);
3570 EXPORT_SYMBOL(unlock_rename);
3571 EXPORT_SYMBOL(vfs_create);
3572 EXPORT_SYMBOL(vfs_follow_link);
3573 EXPORT_SYMBOL(vfs_link);
3574 EXPORT_SYMBOL(vfs_mkdir);
3575 EXPORT_SYMBOL(vfs_mknod);
3576 EXPORT_SYMBOL(generic_permission);
3577 EXPORT_SYMBOL(vfs_readlink);
3578 EXPORT_SYMBOL(vfs_rename);
3579 EXPORT_SYMBOL(vfs_rmdir);
3580 EXPORT_SYMBOL(vfs_symlink);
3581 EXPORT_SYMBOL(vfs_unlink);
3582 EXPORT_SYMBOL(dentry_unhash);
3583 EXPORT_SYMBOL(generic_readlink);