]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame_incremental - fs/namei.c
vfs: add lookup_open()
[mirror_ubuntu-artful-kernel.git] / fs / namei.c
... / ...
CommitLineData
1/*
2 * linux/fs/namei.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * Some corrections by tytso.
9 */
10
11/* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname
12 * lookup logic.
13 */
14/* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture.
15 */
16
17#include <linux/init.h>
18#include <linux/export.h>
19#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/fs.h>
22#include <linux/namei.h>
23#include <linux/pagemap.h>
24#include <linux/fsnotify.h>
25#include <linux/personality.h>
26#include <linux/security.h>
27#include <linux/ima.h>
28#include <linux/syscalls.h>
29#include <linux/mount.h>
30#include <linux/audit.h>
31#include <linux/capability.h>
32#include <linux/file.h>
33#include <linux/fcntl.h>
34#include <linux/device_cgroup.h>
35#include <linux/fs_struct.h>
36#include <linux/posix_acl.h>
37#include <asm/uaccess.h>
38
39#include "internal.h"
40#include "mount.h"
41
42/* [Feb-1997 T. Schoebel-Theuer]
43 * Fundamental changes in the pathname lookup mechanisms (namei)
44 * were necessary because of omirr. The reason is that omirr needs
45 * to know the _real_ pathname, not the user-supplied one, in case
46 * of symlinks (and also when transname replacements occur).
47 *
48 * The new code replaces the old recursive symlink resolution with
49 * an iterative one (in case of non-nested symlink chains). It does
50 * this with calls to <fs>_follow_link().
51 * As a side effect, dir_namei(), _namei() and follow_link() are now
52 * replaced with a single function lookup_dentry() that can handle all
53 * the special cases of the former code.
54 *
55 * With the new dcache, the pathname is stored at each inode, at least as
56 * long as the refcount of the inode is positive. As a side effect, the
57 * size of the dcache depends on the inode cache and thus is dynamic.
58 *
59 * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink
60 * resolution to correspond with current state of the code.
61 *
62 * Note that the symlink resolution is not *completely* iterative.
63 * There is still a significant amount of tail- and mid- recursion in
64 * the algorithm. Also, note that <fs>_readlink() is not used in
65 * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink()
66 * may return different results than <fs>_follow_link(). Many virtual
67 * filesystems (including /proc) exhibit this behavior.
68 */
69
70/* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation:
71 * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL
72 * and the name already exists in form of a symlink, try to create the new
73 * name indicated by the symlink. The old code always complained that the
74 * name already exists, due to not following the symlink even if its target
75 * is nonexistent. The new semantics affects also mknod() and link() when
76 * the name is a symlink pointing to a non-existent name.
77 *
78 * I don't know which semantics is the right one, since I have no access
79 * to standards. But I found by trial that HP-UX 9.0 has the full "new"
80 * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the
81 * "old" one. Personally, I think the new semantics is much more logical.
82 * Note that "ln old new" where "new" is a symlink pointing to a non-existing
83 * file does succeed in both HP-UX and SunOs, but not in Solaris
84 * and in the old Linux semantics.
85 */
86
87/* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink
88 * semantics. See the comments in "open_namei" and "do_link" below.
89 *
90 * [10-Sep-98 Alan Modra] Another symlink change.
91 */
92
93/* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks:
94 * inside the path - always follow.
95 * in the last component in creation/removal/renaming - never follow.
96 * if LOOKUP_FOLLOW passed - follow.
97 * if the pathname has trailing slashes - follow.
98 * otherwise - don't follow.
99 * (applied in that order).
100 *
101 * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT
102 * restored for 2.4. This is the last surviving part of old 4.2BSD bug.
103 * During the 2.4 we need to fix the userland stuff depending on it -
104 * hopefully we will be able to get rid of that wart in 2.5. So far only
105 * XEmacs seems to be relying on it...
106 */
107/*
108 * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
109 * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives
110 * any extra contention...
111 */
112
113/* In order to reduce some races, while at the same time doing additional
114 * checking and hopefully speeding things up, we copy filenames to the
115 * kernel data space before using them..
116 *
117 * POSIX.1 2.4: an empty pathname is invalid (ENOENT).
118 * PATH_MAX includes the nul terminator --RR.
119 */
120static char *getname_flags(const char __user *filename, int flags, int *empty)
121{
122 char *result = __getname(), *err;
123 int len;
124
125 if (unlikely(!result))
126 return ERR_PTR(-ENOMEM);
127
128 len = strncpy_from_user(result, filename, PATH_MAX);
129 err = ERR_PTR(len);
130 if (unlikely(len < 0))
131 goto error;
132
133 /* The empty path is special. */
134 if (unlikely(!len)) {
135 if (empty)
136 *empty = 1;
137 err = ERR_PTR(-ENOENT);
138 if (!(flags & LOOKUP_EMPTY))
139 goto error;
140 }
141
142 err = ERR_PTR(-ENAMETOOLONG);
143 if (likely(len < PATH_MAX)) {
144 audit_getname(result);
145 return result;
146 }
147
148error:
149 __putname(result);
150 return err;
151}
152
153char *getname(const char __user * filename)
154{
155 return getname_flags(filename, 0, NULL);
156}
157
158#ifdef CONFIG_AUDITSYSCALL
159void putname(const char *name)
160{
161 if (unlikely(!audit_dummy_context()))
162 audit_putname(name);
163 else
164 __putname(name);
165}
166EXPORT_SYMBOL(putname);
167#endif
168
169static int check_acl(struct inode *inode, int mask)
170{
171#ifdef CONFIG_FS_POSIX_ACL
172 struct posix_acl *acl;
173
174 if (mask & MAY_NOT_BLOCK) {
175 acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
176 if (!acl)
177 return -EAGAIN;
178 /* no ->get_acl() calls in RCU mode... */
179 if (acl == ACL_NOT_CACHED)
180 return -ECHILD;
181 return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
182 }
183
184 acl = get_cached_acl(inode, ACL_TYPE_ACCESS);
185
186 /*
187 * A filesystem can force a ACL callback by just never filling the
188 * ACL cache. But normally you'd fill the cache either at inode
189 * instantiation time, or on the first ->get_acl call.
190 *
191 * If the filesystem doesn't have a get_acl() function at all, we'll
192 * just create the negative cache entry.
193 */
194 if (acl == ACL_NOT_CACHED) {
195 if (inode->i_op->get_acl) {
196 acl = inode->i_op->get_acl(inode, ACL_TYPE_ACCESS);
197 if (IS_ERR(acl))
198 return PTR_ERR(acl);
199 } else {
200 set_cached_acl(inode, ACL_TYPE_ACCESS, NULL);
201 return -EAGAIN;
202 }
203 }
204
205 if (acl) {
206 int error = posix_acl_permission(inode, acl, mask);
207 posix_acl_release(acl);
208 return error;
209 }
210#endif
211
212 return -EAGAIN;
213}
214
215/*
216 * This does the basic permission checking
217 */
218static int acl_permission_check(struct inode *inode, int mask)
219{
220 unsigned int mode = inode->i_mode;
221
222 if (likely(uid_eq(current_fsuid(), inode->i_uid)))
223 mode >>= 6;
224 else {
225 if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
226 int error = check_acl(inode, mask);
227 if (error != -EAGAIN)
228 return error;
229 }
230
231 if (in_group_p(inode->i_gid))
232 mode >>= 3;
233 }
234
235 /*
236 * If the DACs are ok we don't need any capability check.
237 */
238 if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
239 return 0;
240 return -EACCES;
241}
242
243/**
244 * generic_permission - check for access rights on a Posix-like filesystem
245 * @inode: inode to check access rights for
246 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
247 *
248 * Used to check for read/write/execute permissions on a file.
249 * We use "fsuid" for this, letting us set arbitrary permissions
250 * for filesystem access without changing the "normal" uids which
251 * are used for other things.
252 *
253 * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
254 * request cannot be satisfied (eg. requires blocking or too much complexity).
255 * It would then be called again in ref-walk mode.
256 */
257int generic_permission(struct inode *inode, int mask)
258{
259 int ret;
260
261 /*
262 * Do the basic permission checks.
263 */
264 ret = acl_permission_check(inode, mask);
265 if (ret != -EACCES)
266 return ret;
267
268 if (S_ISDIR(inode->i_mode)) {
269 /* DACs are overridable for directories */
270 if (inode_capable(inode, CAP_DAC_OVERRIDE))
271 return 0;
272 if (!(mask & MAY_WRITE))
273 if (inode_capable(inode, CAP_DAC_READ_SEARCH))
274 return 0;
275 return -EACCES;
276 }
277 /*
278 * Read/write DACs are always overridable.
279 * Executable DACs are overridable when there is
280 * at least one exec bit set.
281 */
282 if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
283 if (inode_capable(inode, CAP_DAC_OVERRIDE))
284 return 0;
285
286 /*
287 * Searching includes executable on directories, else just read.
288 */
289 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
290 if (mask == MAY_READ)
291 if (inode_capable(inode, CAP_DAC_READ_SEARCH))
292 return 0;
293
294 return -EACCES;
295}
296
297/*
298 * We _really_ want to just do "generic_permission()" without
299 * even looking at the inode->i_op values. So we keep a cache
300 * flag in inode->i_opflags, that says "this has not special
301 * permission function, use the fast case".
302 */
303static inline int do_inode_permission(struct inode *inode, int mask)
304{
305 if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
306 if (likely(inode->i_op->permission))
307 return inode->i_op->permission(inode, mask);
308
309 /* This gets set once for the inode lifetime */
310 spin_lock(&inode->i_lock);
311 inode->i_opflags |= IOP_FASTPERM;
312 spin_unlock(&inode->i_lock);
313 }
314 return generic_permission(inode, mask);
315}
316
317/**
318 * inode_permission - check for access rights to a given inode
319 * @inode: inode to check permission on
320 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
321 *
322 * Used to check for read/write/execute permissions on an inode.
323 * We use "fsuid" for this, letting us set arbitrary permissions
324 * for filesystem access without changing the "normal" uids which
325 * are used for other things.
326 *
327 * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
328 */
329int inode_permission(struct inode *inode, int mask)
330{
331 int retval;
332
333 if (unlikely(mask & MAY_WRITE)) {
334 umode_t mode = inode->i_mode;
335
336 /*
337 * Nobody gets write access to a read-only fs.
338 */
339 if (IS_RDONLY(inode) &&
340 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
341 return -EROFS;
342
343 /*
344 * Nobody gets write access to an immutable file.
345 */
346 if (IS_IMMUTABLE(inode))
347 return -EACCES;
348 }
349
350 retval = do_inode_permission(inode, mask);
351 if (retval)
352 return retval;
353
354 retval = devcgroup_inode_permission(inode, mask);
355 if (retval)
356 return retval;
357
358 return security_inode_permission(inode, mask);
359}
360
361/**
362 * path_get - get a reference to a path
363 * @path: path to get the reference to
364 *
365 * Given a path increment the reference count to the dentry and the vfsmount.
366 */
367void path_get(struct path *path)
368{
369 mntget(path->mnt);
370 dget(path->dentry);
371}
372EXPORT_SYMBOL(path_get);
373
374/**
375 * path_put - put a reference to a path
376 * @path: path to put the reference to
377 *
378 * Given a path decrement the reference count to the dentry and the vfsmount.
379 */
380void path_put(struct path *path)
381{
382 dput(path->dentry);
383 mntput(path->mnt);
384}
385EXPORT_SYMBOL(path_put);
386
387/*
388 * Path walking has 2 modes, rcu-walk and ref-walk (see
389 * Documentation/filesystems/path-lookup.txt). In situations when we can't
390 * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab
391 * normal reference counts on dentries and vfsmounts to transition to rcu-walk
392 * mode. Refcounts are grabbed at the last known good point before rcu-walk
393 * got stuck, so ref-walk may continue from there. If this is not successful
394 * (eg. a seqcount has changed), then failure is returned and it's up to caller
395 * to restart the path walk from the beginning in ref-walk mode.
396 */
397
398/**
399 * unlazy_walk - try to switch to ref-walk mode.
400 * @nd: nameidata pathwalk data
401 * @dentry: child of nd->path.dentry or NULL
402 * Returns: 0 on success, -ECHILD on failure
403 *
404 * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry
405 * for ref-walk mode. @dentry must be a path found by a do_lookup call on
406 * @nd or NULL. Must be called from rcu-walk context.
407 */
408static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
409{
410 struct fs_struct *fs = current->fs;
411 struct dentry *parent = nd->path.dentry;
412 int want_root = 0;
413
414 BUG_ON(!(nd->flags & LOOKUP_RCU));
415 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
416 want_root = 1;
417 spin_lock(&fs->lock);
418 if (nd->root.mnt != fs->root.mnt ||
419 nd->root.dentry != fs->root.dentry)
420 goto err_root;
421 }
422 spin_lock(&parent->d_lock);
423 if (!dentry) {
424 if (!__d_rcu_to_refcount(parent, nd->seq))
425 goto err_parent;
426 BUG_ON(nd->inode != parent->d_inode);
427 } else {
428 if (dentry->d_parent != parent)
429 goto err_parent;
430 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
431 if (!__d_rcu_to_refcount(dentry, nd->seq))
432 goto err_child;
433 /*
434 * If the sequence check on the child dentry passed, then
435 * the child has not been removed from its parent. This
436 * means the parent dentry must be valid and able to take
437 * a reference at this point.
438 */
439 BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
440 BUG_ON(!parent->d_count);
441 parent->d_count++;
442 spin_unlock(&dentry->d_lock);
443 }
444 spin_unlock(&parent->d_lock);
445 if (want_root) {
446 path_get(&nd->root);
447 spin_unlock(&fs->lock);
448 }
449 mntget(nd->path.mnt);
450
451 rcu_read_unlock();
452 br_read_unlock(&vfsmount_lock);
453 nd->flags &= ~LOOKUP_RCU;
454 return 0;
455
456err_child:
457 spin_unlock(&dentry->d_lock);
458err_parent:
459 spin_unlock(&parent->d_lock);
460err_root:
461 if (want_root)
462 spin_unlock(&fs->lock);
463 return -ECHILD;
464}
465
466/**
467 * release_open_intent - free up open intent resources
468 * @nd: pointer to nameidata
469 */
470void release_open_intent(struct nameidata *nd)
471{
472 struct file *file = nd->intent.open.file;
473
474 if (file && !IS_ERR(file)) {
475 if (file->f_path.dentry == NULL)
476 put_filp(file);
477 else
478 fput(file);
479 }
480}
481
482static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd)
483{
484 return dentry->d_op->d_revalidate(dentry, nd);
485}
486
487/**
488 * complete_walk - successful completion of path walk
489 * @nd: pointer nameidata
490 *
491 * If we had been in RCU mode, drop out of it and legitimize nd->path.
492 * Revalidate the final result, unless we'd already done that during
493 * the path walk or the filesystem doesn't ask for it. Return 0 on
494 * success, -error on failure. In case of failure caller does not
495 * need to drop nd->path.
496 */
497static int complete_walk(struct nameidata *nd)
498{
499 struct dentry *dentry = nd->path.dentry;
500 int status;
501
502 if (nd->flags & LOOKUP_RCU) {
503 nd->flags &= ~LOOKUP_RCU;
504 if (!(nd->flags & LOOKUP_ROOT))
505 nd->root.mnt = NULL;
506 spin_lock(&dentry->d_lock);
507 if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
508 spin_unlock(&dentry->d_lock);
509 rcu_read_unlock();
510 br_read_unlock(&vfsmount_lock);
511 return -ECHILD;
512 }
513 BUG_ON(nd->inode != dentry->d_inode);
514 spin_unlock(&dentry->d_lock);
515 mntget(nd->path.mnt);
516 rcu_read_unlock();
517 br_read_unlock(&vfsmount_lock);
518 }
519
520 if (likely(!(nd->flags & LOOKUP_JUMPED)))
521 return 0;
522
523 if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
524 return 0;
525
526 if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)))
527 return 0;
528
529 /* Note: we do not d_invalidate() */
530 status = d_revalidate(dentry, nd);
531 if (status > 0)
532 return 0;
533
534 if (!status)
535 status = -ESTALE;
536
537 path_put(&nd->path);
538 return status;
539}
540
541static __always_inline void set_root(struct nameidata *nd)
542{
543 if (!nd->root.mnt)
544 get_fs_root(current->fs, &nd->root);
545}
546
547static int link_path_walk(const char *, struct nameidata *);
548
549static __always_inline void set_root_rcu(struct nameidata *nd)
550{
551 if (!nd->root.mnt) {
552 struct fs_struct *fs = current->fs;
553 unsigned seq;
554
555 do {
556 seq = read_seqcount_begin(&fs->seq);
557 nd->root = fs->root;
558 nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
559 } while (read_seqcount_retry(&fs->seq, seq));
560 }
561}
562
563static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link)
564{
565 int ret;
566
567 if (IS_ERR(link))
568 goto fail;
569
570 if (*link == '/') {
571 set_root(nd);
572 path_put(&nd->path);
573 nd->path = nd->root;
574 path_get(&nd->root);
575 nd->flags |= LOOKUP_JUMPED;
576 }
577 nd->inode = nd->path.dentry->d_inode;
578
579 ret = link_path_walk(link, nd);
580 return ret;
581fail:
582 path_put(&nd->path);
583 return PTR_ERR(link);
584}
585
586static void path_put_conditional(struct path *path, struct nameidata *nd)
587{
588 dput(path->dentry);
589 if (path->mnt != nd->path.mnt)
590 mntput(path->mnt);
591}
592
593static inline void path_to_nameidata(const struct path *path,
594 struct nameidata *nd)
595{
596 if (!(nd->flags & LOOKUP_RCU)) {
597 dput(nd->path.dentry);
598 if (nd->path.mnt != path->mnt)
599 mntput(nd->path.mnt);
600 }
601 nd->path.mnt = path->mnt;
602 nd->path.dentry = path->dentry;
603}
604
605static inline void put_link(struct nameidata *nd, struct path *link, void *cookie)
606{
607 struct inode *inode = link->dentry->d_inode;
608 if (inode->i_op->put_link)
609 inode->i_op->put_link(link->dentry, nd, cookie);
610 path_put(link);
611}
612
613static __always_inline int
614follow_link(struct path *link, struct nameidata *nd, void **p)
615{
616 struct dentry *dentry = link->dentry;
617 int error;
618 char *s;
619
620 BUG_ON(nd->flags & LOOKUP_RCU);
621
622 if (link->mnt == nd->path.mnt)
623 mntget(link->mnt);
624
625 error = -ELOOP;
626 if (unlikely(current->total_link_count >= 40))
627 goto out_put_nd_path;
628
629 cond_resched();
630 current->total_link_count++;
631
632 touch_atime(link);
633 nd_set_link(nd, NULL);
634
635 error = security_inode_follow_link(link->dentry, nd);
636 if (error)
637 goto out_put_nd_path;
638
639 nd->last_type = LAST_BIND;
640 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
641 error = PTR_ERR(*p);
642 if (IS_ERR(*p))
643 goto out_put_link;
644
645 error = 0;
646 s = nd_get_link(nd);
647 if (s) {
648 error = __vfs_follow_link(nd, s);
649 } else if (nd->last_type == LAST_BIND) {
650 nd->flags |= LOOKUP_JUMPED;
651 nd->inode = nd->path.dentry->d_inode;
652 if (nd->inode->i_op->follow_link) {
653 /* stepped on a _really_ weird one */
654 path_put(&nd->path);
655 error = -ELOOP;
656 }
657 }
658 if (unlikely(error))
659 put_link(nd, link, *p);
660
661 return error;
662
663out_put_nd_path:
664 path_put(&nd->path);
665out_put_link:
666 path_put(link);
667 return error;
668}
669
670static int follow_up_rcu(struct path *path)
671{
672 struct mount *mnt = real_mount(path->mnt);
673 struct mount *parent;
674 struct dentry *mountpoint;
675
676 parent = mnt->mnt_parent;
677 if (&parent->mnt == path->mnt)
678 return 0;
679 mountpoint = mnt->mnt_mountpoint;
680 path->dentry = mountpoint;
681 path->mnt = &parent->mnt;
682 return 1;
683}
684
685int follow_up(struct path *path)
686{
687 struct mount *mnt = real_mount(path->mnt);
688 struct mount *parent;
689 struct dentry *mountpoint;
690
691 br_read_lock(&vfsmount_lock);
692 parent = mnt->mnt_parent;
693 if (&parent->mnt == path->mnt) {
694 br_read_unlock(&vfsmount_lock);
695 return 0;
696 }
697 mntget(&parent->mnt);
698 mountpoint = dget(mnt->mnt_mountpoint);
699 br_read_unlock(&vfsmount_lock);
700 dput(path->dentry);
701 path->dentry = mountpoint;
702 mntput(path->mnt);
703 path->mnt = &parent->mnt;
704 return 1;
705}
706
707/*
708 * Perform an automount
709 * - return -EISDIR to tell follow_managed() to stop and return the path we
710 * were called with.
711 */
712static int follow_automount(struct path *path, unsigned flags,
713 bool *need_mntput)
714{
715 struct vfsmount *mnt;
716 int err;
717
718 if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
719 return -EREMOTE;
720
721 /* We don't want to mount if someone's just doing a stat -
722 * unless they're stat'ing a directory and appended a '/' to
723 * the name.
724 *
725 * We do, however, want to mount if someone wants to open or
726 * create a file of any type under the mountpoint, wants to
727 * traverse through the mountpoint or wants to open the
728 * mounted directory. Also, autofs may mark negative dentries
729 * as being automount points. These will need the attentions
730 * of the daemon to instantiate them before they can be used.
731 */
732 if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
733 LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
734 path->dentry->d_inode)
735 return -EISDIR;
736
737 current->total_link_count++;
738 if (current->total_link_count >= 40)
739 return -ELOOP;
740
741 mnt = path->dentry->d_op->d_automount(path);
742 if (IS_ERR(mnt)) {
743 /*
744 * The filesystem is allowed to return -EISDIR here to indicate
745 * it doesn't want to automount. For instance, autofs would do
746 * this so that its userspace daemon can mount on this dentry.
747 *
748 * However, we can only permit this if it's a terminal point in
749 * the path being looked up; if it wasn't then the remainder of
750 * the path is inaccessible and we should say so.
751 */
752 if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_PARENT))
753 return -EREMOTE;
754 return PTR_ERR(mnt);
755 }
756
757 if (!mnt) /* mount collision */
758 return 0;
759
760 if (!*need_mntput) {
761 /* lock_mount() may release path->mnt on error */
762 mntget(path->mnt);
763 *need_mntput = true;
764 }
765 err = finish_automount(mnt, path);
766
767 switch (err) {
768 case -EBUSY:
769 /* Someone else made a mount here whilst we were busy */
770 return 0;
771 case 0:
772 path_put(path);
773 path->mnt = mnt;
774 path->dentry = dget(mnt->mnt_root);
775 return 0;
776 default:
777 return err;
778 }
779
780}
781
782/*
783 * Handle a dentry that is managed in some way.
784 * - Flagged for transit management (autofs)
785 * - Flagged as mountpoint
786 * - Flagged as automount point
787 *
788 * This may only be called in refwalk mode.
789 *
790 * Serialization is taken care of in namespace.c
791 */
792static int follow_managed(struct path *path, unsigned flags)
793{
794 struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
795 unsigned managed;
796 bool need_mntput = false;
797 int ret = 0;
798
799 /* Given that we're not holding a lock here, we retain the value in a
800 * local variable for each dentry as we look at it so that we don't see
801 * the components of that value change under us */
802 while (managed = ACCESS_ONCE(path->dentry->d_flags),
803 managed &= DCACHE_MANAGED_DENTRY,
804 unlikely(managed != 0)) {
805 /* Allow the filesystem to manage the transit without i_mutex
806 * being held. */
807 if (managed & DCACHE_MANAGE_TRANSIT) {
808 BUG_ON(!path->dentry->d_op);
809 BUG_ON(!path->dentry->d_op->d_manage);
810 ret = path->dentry->d_op->d_manage(path->dentry, false);
811 if (ret < 0)
812 break;
813 }
814
815 /* Transit to a mounted filesystem. */
816 if (managed & DCACHE_MOUNTED) {
817 struct vfsmount *mounted = lookup_mnt(path);
818 if (mounted) {
819 dput(path->dentry);
820 if (need_mntput)
821 mntput(path->mnt);
822 path->mnt = mounted;
823 path->dentry = dget(mounted->mnt_root);
824 need_mntput = true;
825 continue;
826 }
827
828 /* Something is mounted on this dentry in another
829 * namespace and/or whatever was mounted there in this
830 * namespace got unmounted before we managed to get the
831 * vfsmount_lock */
832 }
833
834 /* Handle an automount point */
835 if (managed & DCACHE_NEED_AUTOMOUNT) {
836 ret = follow_automount(path, flags, &need_mntput);
837 if (ret < 0)
838 break;
839 continue;
840 }
841
842 /* We didn't change the current path point */
843 break;
844 }
845
846 if (need_mntput && path->mnt == mnt)
847 mntput(path->mnt);
848 if (ret == -EISDIR)
849 ret = 0;
850 return ret < 0 ? ret : need_mntput;
851}
852
853int follow_down_one(struct path *path)
854{
855 struct vfsmount *mounted;
856
857 mounted = lookup_mnt(path);
858 if (mounted) {
859 dput(path->dentry);
860 mntput(path->mnt);
861 path->mnt = mounted;
862 path->dentry = dget(mounted->mnt_root);
863 return 1;
864 }
865 return 0;
866}
867
868static inline bool managed_dentry_might_block(struct dentry *dentry)
869{
870 return (dentry->d_flags & DCACHE_MANAGE_TRANSIT &&
871 dentry->d_op->d_manage(dentry, true) < 0);
872}
873
874/*
875 * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
876 * we meet a managed dentry that would need blocking.
877 */
878static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
879 struct inode **inode)
880{
881 for (;;) {
882 struct mount *mounted;
883 /*
884 * Don't forget we might have a non-mountpoint managed dentry
885 * that wants to block transit.
886 */
887 if (unlikely(managed_dentry_might_block(path->dentry)))
888 return false;
889
890 if (!d_mountpoint(path->dentry))
891 break;
892
893 mounted = __lookup_mnt(path->mnt, path->dentry, 1);
894 if (!mounted)
895 break;
896 path->mnt = &mounted->mnt;
897 path->dentry = mounted->mnt.mnt_root;
898 nd->flags |= LOOKUP_JUMPED;
899 nd->seq = read_seqcount_begin(&path->dentry->d_seq);
900 /*
901 * Update the inode too. We don't need to re-check the
902 * dentry sequence number here after this d_inode read,
903 * because a mount-point is always pinned.
904 */
905 *inode = path->dentry->d_inode;
906 }
907 return true;
908}
909
910static void follow_mount_rcu(struct nameidata *nd)
911{
912 while (d_mountpoint(nd->path.dentry)) {
913 struct mount *mounted;
914 mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry, 1);
915 if (!mounted)
916 break;
917 nd->path.mnt = &mounted->mnt;
918 nd->path.dentry = mounted->mnt.mnt_root;
919 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
920 }
921}
922
923static int follow_dotdot_rcu(struct nameidata *nd)
924{
925 set_root_rcu(nd);
926
927 while (1) {
928 if (nd->path.dentry == nd->root.dentry &&
929 nd->path.mnt == nd->root.mnt) {
930 break;
931 }
932 if (nd->path.dentry != nd->path.mnt->mnt_root) {
933 struct dentry *old = nd->path.dentry;
934 struct dentry *parent = old->d_parent;
935 unsigned seq;
936
937 seq = read_seqcount_begin(&parent->d_seq);
938 if (read_seqcount_retry(&old->d_seq, nd->seq))
939 goto failed;
940 nd->path.dentry = parent;
941 nd->seq = seq;
942 break;
943 }
944 if (!follow_up_rcu(&nd->path))
945 break;
946 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
947 }
948 follow_mount_rcu(nd);
949 nd->inode = nd->path.dentry->d_inode;
950 return 0;
951
952failed:
953 nd->flags &= ~LOOKUP_RCU;
954 if (!(nd->flags & LOOKUP_ROOT))
955 nd->root.mnt = NULL;
956 rcu_read_unlock();
957 br_read_unlock(&vfsmount_lock);
958 return -ECHILD;
959}
960
961/*
962 * Follow down to the covering mount currently visible to userspace. At each
963 * point, the filesystem owning that dentry may be queried as to whether the
964 * caller is permitted to proceed or not.
965 */
966int follow_down(struct path *path)
967{
968 unsigned managed;
969 int ret;
970
971 while (managed = ACCESS_ONCE(path->dentry->d_flags),
972 unlikely(managed & DCACHE_MANAGED_DENTRY)) {
973 /* Allow the filesystem to manage the transit without i_mutex
974 * being held.
975 *
976 * We indicate to the filesystem if someone is trying to mount
977 * something here. This gives autofs the chance to deny anyone
978 * other than its daemon the right to mount on its
979 * superstructure.
980 *
981 * The filesystem may sleep at this point.
982 */
983 if (managed & DCACHE_MANAGE_TRANSIT) {
984 BUG_ON(!path->dentry->d_op);
985 BUG_ON(!path->dentry->d_op->d_manage);
986 ret = path->dentry->d_op->d_manage(
987 path->dentry, false);
988 if (ret < 0)
989 return ret == -EISDIR ? 0 : ret;
990 }
991
992 /* Transit to a mounted filesystem. */
993 if (managed & DCACHE_MOUNTED) {
994 struct vfsmount *mounted = lookup_mnt(path);
995 if (!mounted)
996 break;
997 dput(path->dentry);
998 mntput(path->mnt);
999 path->mnt = mounted;
1000 path->dentry = dget(mounted->mnt_root);
1001 continue;
1002 }
1003
1004 /* Don't handle automount points here */
1005 break;
1006 }
1007 return 0;
1008}
1009
1010/*
1011 * Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
1012 */
1013static void follow_mount(struct path *path)
1014{
1015 while (d_mountpoint(path->dentry)) {
1016 struct vfsmount *mounted = lookup_mnt(path);
1017 if (!mounted)
1018 break;
1019 dput(path->dentry);
1020 mntput(path->mnt);
1021 path->mnt = mounted;
1022 path->dentry = dget(mounted->mnt_root);
1023 }
1024}
1025
1026static void follow_dotdot(struct nameidata *nd)
1027{
1028 set_root(nd);
1029
1030 while(1) {
1031 struct dentry *old = nd->path.dentry;
1032
1033 if (nd->path.dentry == nd->root.dentry &&
1034 nd->path.mnt == nd->root.mnt) {
1035 break;
1036 }
1037 if (nd->path.dentry != nd->path.mnt->mnt_root) {
1038 /* rare case of legitimate dget_parent()... */
1039 nd->path.dentry = dget_parent(nd->path.dentry);
1040 dput(old);
1041 break;
1042 }
1043 if (!follow_up(&nd->path))
1044 break;
1045 }
1046 follow_mount(&nd->path);
1047 nd->inode = nd->path.dentry->d_inode;
1048}
1049
1050/*
1051 * This looks up the name in dcache, possibly revalidates the old dentry and
1052 * allocates a new one if not found or not valid. In the need_lookup argument
1053 * returns whether i_op->lookup is necessary.
1054 *
1055 * dir->d_inode->i_mutex must be held
1056 */
1057static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
1058 struct nameidata *nd, bool *need_lookup)
1059{
1060 struct dentry *dentry;
1061 int error;
1062
1063 *need_lookup = false;
1064 dentry = d_lookup(dir, name);
1065 if (dentry) {
1066 if (d_need_lookup(dentry)) {
1067 *need_lookup = true;
1068 } else if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
1069 error = d_revalidate(dentry, nd);
1070 if (unlikely(error <= 0)) {
1071 if (error < 0) {
1072 dput(dentry);
1073 return ERR_PTR(error);
1074 } else if (!d_invalidate(dentry)) {
1075 dput(dentry);
1076 dentry = NULL;
1077 }
1078 }
1079 }
1080 }
1081
1082 if (!dentry) {
1083 dentry = d_alloc(dir, name);
1084 if (unlikely(!dentry))
1085 return ERR_PTR(-ENOMEM);
1086
1087 *need_lookup = true;
1088 }
1089 return dentry;
1090}
1091
1092/*
1093 * Call i_op->lookup on the dentry. The dentry must be negative but may be
1094 * hashed if it was pouplated with DCACHE_NEED_LOOKUP.
1095 *
1096 * dir->d_inode->i_mutex must be held
1097 */
1098static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
1099 struct nameidata *nd)
1100{
1101 struct dentry *old;
1102
1103 /* Don't create child dentry for a dead directory. */
1104 if (unlikely(IS_DEADDIR(dir))) {
1105 dput(dentry);
1106 return ERR_PTR(-ENOENT);
1107 }
1108
1109 old = dir->i_op->lookup(dir, dentry, nd);
1110 if (unlikely(old)) {
1111 dput(dentry);
1112 dentry = old;
1113 }
1114 return dentry;
1115}
1116
1117static struct dentry *__lookup_hash(struct qstr *name,
1118 struct dentry *base, struct nameidata *nd)
1119{
1120 bool need_lookup;
1121 struct dentry *dentry;
1122
1123 dentry = lookup_dcache(name, base, nd, &need_lookup);
1124 if (!need_lookup)
1125 return dentry;
1126
1127 return lookup_real(base->d_inode, dentry, nd);
1128}
1129
1130/*
1131 * It's more convoluted than I'd like it to be, but... it's still fairly
1132 * small and for now I'd prefer to have fast path as straight as possible.
1133 * It _is_ time-critical.
1134 */
1135static int lookup_fast(struct nameidata *nd, struct qstr *name,
1136 struct path *path, struct inode **inode)
1137{
1138 struct vfsmount *mnt = nd->path.mnt;
1139 struct dentry *dentry, *parent = nd->path.dentry;
1140 int need_reval = 1;
1141 int status = 1;
1142 int err;
1143
1144 /*
1145 * Rename seqlock is not required here because in the off chance
1146 * of a false negative due to a concurrent rename, we're going to
1147 * do the non-racy lookup, below.
1148 */
1149 if (nd->flags & LOOKUP_RCU) {
1150 unsigned seq;
1151 dentry = __d_lookup_rcu(parent, name, &seq, nd->inode);
1152 if (!dentry)
1153 goto unlazy;
1154
1155 /*
1156 * This sequence count validates that the inode matches
1157 * the dentry name information from lookup.
1158 */
1159 *inode = dentry->d_inode;
1160 if (read_seqcount_retry(&dentry->d_seq, seq))
1161 return -ECHILD;
1162
1163 /*
1164 * This sequence count validates that the parent had no
1165 * changes while we did the lookup of the dentry above.
1166 *
1167 * The memory barrier in read_seqcount_begin of child is
1168 * enough, we can use __read_seqcount_retry here.
1169 */
1170 if (__read_seqcount_retry(&parent->d_seq, nd->seq))
1171 return -ECHILD;
1172 nd->seq = seq;
1173
1174 if (unlikely(d_need_lookup(dentry)))
1175 goto unlazy;
1176 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
1177 status = d_revalidate(dentry, nd);
1178 if (unlikely(status <= 0)) {
1179 if (status != -ECHILD)
1180 need_reval = 0;
1181 goto unlazy;
1182 }
1183 }
1184 path->mnt = mnt;
1185 path->dentry = dentry;
1186 if (unlikely(!__follow_mount_rcu(nd, path, inode)))
1187 goto unlazy;
1188 if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
1189 goto unlazy;
1190 return 0;
1191unlazy:
1192 if (unlazy_walk(nd, dentry))
1193 return -ECHILD;
1194 } else {
1195 dentry = __d_lookup(parent, name);
1196 }
1197
1198 if (unlikely(!dentry))
1199 goto need_lookup;
1200
1201 if (unlikely(d_need_lookup(dentry))) {
1202 dput(dentry);
1203 goto need_lookup;
1204 }
1205
1206 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
1207 status = d_revalidate(dentry, nd);
1208 if (unlikely(status <= 0)) {
1209 if (status < 0) {
1210 dput(dentry);
1211 return status;
1212 }
1213 if (!d_invalidate(dentry)) {
1214 dput(dentry);
1215 goto need_lookup;
1216 }
1217 }
1218
1219 path->mnt = mnt;
1220 path->dentry = dentry;
1221 err = follow_managed(path, nd->flags);
1222 if (unlikely(err < 0)) {
1223 path_put_conditional(path, nd);
1224 return err;
1225 }
1226 if (err)
1227 nd->flags |= LOOKUP_JUMPED;
1228 *inode = path->dentry->d_inode;
1229 return 0;
1230
1231need_lookup:
1232 return 1;
1233}
1234
1235/* Fast lookup failed, do it the slow way */
1236static int lookup_slow(struct nameidata *nd, struct qstr *name,
1237 struct path *path)
1238{
1239 struct dentry *dentry, *parent;
1240 int err;
1241
1242 parent = nd->path.dentry;
1243 BUG_ON(nd->inode != parent->d_inode);
1244
1245 mutex_lock(&parent->d_inode->i_mutex);
1246 dentry = __lookup_hash(name, parent, nd);
1247 mutex_unlock(&parent->d_inode->i_mutex);
1248 if (IS_ERR(dentry))
1249 return PTR_ERR(dentry);
1250 path->mnt = nd->path.mnt;
1251 path->dentry = dentry;
1252 err = follow_managed(path, nd->flags);
1253 if (unlikely(err < 0)) {
1254 path_put_conditional(path, nd);
1255 return err;
1256 }
1257 if (err)
1258 nd->flags |= LOOKUP_JUMPED;
1259 return 0;
1260}
1261
1262static inline int may_lookup(struct nameidata *nd)
1263{
1264 if (nd->flags & LOOKUP_RCU) {
1265 int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
1266 if (err != -ECHILD)
1267 return err;
1268 if (unlazy_walk(nd, NULL))
1269 return -ECHILD;
1270 }
1271 return inode_permission(nd->inode, MAY_EXEC);
1272}
1273
1274static inline int handle_dots(struct nameidata *nd, int type)
1275{
1276 if (type == LAST_DOTDOT) {
1277 if (nd->flags & LOOKUP_RCU) {
1278 if (follow_dotdot_rcu(nd))
1279 return -ECHILD;
1280 } else
1281 follow_dotdot(nd);
1282 }
1283 return 0;
1284}
1285
1286static void terminate_walk(struct nameidata *nd)
1287{
1288 if (!(nd->flags & LOOKUP_RCU)) {
1289 path_put(&nd->path);
1290 } else {
1291 nd->flags &= ~LOOKUP_RCU;
1292 if (!(nd->flags & LOOKUP_ROOT))
1293 nd->root.mnt = NULL;
1294 rcu_read_unlock();
1295 br_read_unlock(&vfsmount_lock);
1296 }
1297}
1298
1299/*
1300 * Do we need to follow links? We _really_ want to be able
1301 * to do this check without having to look at inode->i_op,
1302 * so we keep a cache of "no, this doesn't need follow_link"
1303 * for the common case.
1304 */
1305static inline int should_follow_link(struct inode *inode, int follow)
1306{
1307 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1308 if (likely(inode->i_op->follow_link))
1309 return follow;
1310
1311 /* This gets set once for the inode lifetime */
1312 spin_lock(&inode->i_lock);
1313 inode->i_opflags |= IOP_NOFOLLOW;
1314 spin_unlock(&inode->i_lock);
1315 }
1316 return 0;
1317}
1318
1319static inline int walk_component(struct nameidata *nd, struct path *path,
1320 struct qstr *name, int type, int follow)
1321{
1322 struct inode *inode;
1323 int err;
1324 /*
1325 * "." and ".." are special - ".." especially so because it has
1326 * to be able to know about the current root directory and
1327 * parent relationships.
1328 */
1329 if (unlikely(type != LAST_NORM))
1330 return handle_dots(nd, type);
1331 err = lookup_fast(nd, name, path, &inode);
1332 if (unlikely(err)) {
1333 if (err < 0)
1334 goto out_err;
1335
1336 err = lookup_slow(nd, name, path);
1337 if (err < 0)
1338 goto out_err;
1339
1340 inode = path->dentry->d_inode;
1341 }
1342 err = -ENOENT;
1343 if (!inode)
1344 goto out_path_put;
1345
1346 if (should_follow_link(inode, follow)) {
1347 if (nd->flags & LOOKUP_RCU) {
1348 if (unlikely(unlazy_walk(nd, path->dentry))) {
1349 err = -ECHILD;
1350 goto out_err;
1351 }
1352 }
1353 BUG_ON(inode != path->dentry->d_inode);
1354 return 1;
1355 }
1356 path_to_nameidata(path, nd);
1357 nd->inode = inode;
1358 return 0;
1359
1360out_path_put:
1361 path_to_nameidata(path, nd);
1362out_err:
1363 terminate_walk(nd);
1364 return err;
1365}
1366
1367/*
1368 * This limits recursive symlink follows to 8, while
1369 * limiting consecutive symlinks to 40.
1370 *
1371 * Without that kind of total limit, nasty chains of consecutive
1372 * symlinks can cause almost arbitrarily long lookups.
1373 */
1374static inline int nested_symlink(struct path *path, struct nameidata *nd)
1375{
1376 int res;
1377
1378 if (unlikely(current->link_count >= MAX_NESTED_LINKS)) {
1379 path_put_conditional(path, nd);
1380 path_put(&nd->path);
1381 return -ELOOP;
1382 }
1383 BUG_ON(nd->depth >= MAX_NESTED_LINKS);
1384
1385 nd->depth++;
1386 current->link_count++;
1387
1388 do {
1389 struct path link = *path;
1390 void *cookie;
1391
1392 res = follow_link(&link, nd, &cookie);
1393 if (res)
1394 break;
1395 res = walk_component(nd, path, &nd->last,
1396 nd->last_type, LOOKUP_FOLLOW);
1397 put_link(nd, &link, cookie);
1398 } while (res > 0);
1399
1400 current->link_count--;
1401 nd->depth--;
1402 return res;
1403}
1404
1405/*
1406 * We really don't want to look at inode->i_op->lookup
1407 * when we don't have to. So we keep a cache bit in
1408 * the inode ->i_opflags field that says "yes, we can
1409 * do lookup on this inode".
1410 */
1411static inline int can_lookup(struct inode *inode)
1412{
1413 if (likely(inode->i_opflags & IOP_LOOKUP))
1414 return 1;
1415 if (likely(!inode->i_op->lookup))
1416 return 0;
1417
1418 /* We do this once for the lifetime of the inode */
1419 spin_lock(&inode->i_lock);
1420 inode->i_opflags |= IOP_LOOKUP;
1421 spin_unlock(&inode->i_lock);
1422 return 1;
1423}
1424
1425/*
1426 * We can do the critical dentry name comparison and hashing
1427 * operations one word at a time, but we are limited to:
1428 *
1429 * - Architectures with fast unaligned word accesses. We could
1430 * do a "get_unaligned()" if this helps and is sufficiently
1431 * fast.
1432 *
1433 * - Little-endian machines (so that we can generate the mask
1434 * of low bytes efficiently). Again, we *could* do a byte
1435 * swapping load on big-endian architectures if that is not
1436 * expensive enough to make the optimization worthless.
1437 *
1438 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
1439 * do not trap on the (extremely unlikely) case of a page
1440 * crossing operation.
1441 *
1442 * - Furthermore, we need an efficient 64-bit compile for the
1443 * 64-bit case in order to generate the "number of bytes in
1444 * the final mask". Again, that could be replaced with a
1445 * efficient population count instruction or similar.
1446 */
1447#ifdef CONFIG_DCACHE_WORD_ACCESS
1448
1449#include <asm/word-at-a-time.h>
1450
1451#ifdef CONFIG_64BIT
1452
1453static inline unsigned int fold_hash(unsigned long hash)
1454{
1455 hash += hash >> (8*sizeof(int));
1456 return hash;
1457}
1458
1459#else /* 32-bit case */
1460
1461#define fold_hash(x) (x)
1462
1463#endif
1464
1465unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1466{
1467 unsigned long a, mask;
1468 unsigned long hash = 0;
1469
1470 for (;;) {
1471 a = load_unaligned_zeropad(name);
1472 if (len < sizeof(unsigned long))
1473 break;
1474 hash += a;
1475 hash *= 9;
1476 name += sizeof(unsigned long);
1477 len -= sizeof(unsigned long);
1478 if (!len)
1479 goto done;
1480 }
1481 mask = ~(~0ul << len*8);
1482 hash += mask & a;
1483done:
1484 return fold_hash(hash);
1485}
1486EXPORT_SYMBOL(full_name_hash);
1487
1488/*
1489 * Calculate the length and hash of the path component, and
1490 * return the length of the component;
1491 */
1492static inline unsigned long hash_name(const char *name, unsigned int *hashp)
1493{
1494 unsigned long a, b, adata, bdata, mask, hash, len;
1495 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
1496
1497 hash = a = 0;
1498 len = -sizeof(unsigned long);
1499 do {
1500 hash = (hash + a) * 9;
1501 len += sizeof(unsigned long);
1502 a = load_unaligned_zeropad(name+len);
1503 b = a ^ REPEAT_BYTE('/');
1504 } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
1505
1506 adata = prep_zero_mask(a, adata, &constants);
1507 bdata = prep_zero_mask(b, bdata, &constants);
1508
1509 mask = create_zero_mask(adata | bdata);
1510
1511 hash += a & zero_bytemask(mask);
1512 *hashp = fold_hash(hash);
1513
1514 return len + find_zero(mask);
1515}
1516
1517#else
1518
1519unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1520{
1521 unsigned long hash = init_name_hash();
1522 while (len--)
1523 hash = partial_name_hash(*name++, hash);
1524 return end_name_hash(hash);
1525}
1526EXPORT_SYMBOL(full_name_hash);
1527
1528/*
1529 * We know there's a real path component here of at least
1530 * one character.
1531 */
1532static inline unsigned long hash_name(const char *name, unsigned int *hashp)
1533{
1534 unsigned long hash = init_name_hash();
1535 unsigned long len = 0, c;
1536
1537 c = (unsigned char)*name;
1538 do {
1539 len++;
1540 hash = partial_name_hash(c, hash);
1541 c = (unsigned char)name[len];
1542 } while (c && c != '/');
1543 *hashp = end_name_hash(hash);
1544 return len;
1545}
1546
1547#endif
1548
1549/*
1550 * Name resolution.
1551 * This is the basic name resolution function, turning a pathname into
1552 * the final dentry. We expect 'base' to be positive and a directory.
1553 *
1554 * Returns 0 and nd will have valid dentry and mnt on success.
1555 * Returns error and drops reference to input namei data on failure.
1556 */
1557static int link_path_walk(const char *name, struct nameidata *nd)
1558{
1559 struct path next;
1560 int err;
1561
1562 while (*name=='/')
1563 name++;
1564 if (!*name)
1565 return 0;
1566
1567 /* At this point we know we have a real path component. */
1568 for(;;) {
1569 struct qstr this;
1570 long len;
1571 int type;
1572
1573 err = may_lookup(nd);
1574 if (err)
1575 break;
1576
1577 len = hash_name(name, &this.hash);
1578 this.name = name;
1579 this.len = len;
1580
1581 type = LAST_NORM;
1582 if (name[0] == '.') switch (len) {
1583 case 2:
1584 if (name[1] == '.') {
1585 type = LAST_DOTDOT;
1586 nd->flags |= LOOKUP_JUMPED;
1587 }
1588 break;
1589 case 1:
1590 type = LAST_DOT;
1591 }
1592 if (likely(type == LAST_NORM)) {
1593 struct dentry *parent = nd->path.dentry;
1594 nd->flags &= ~LOOKUP_JUMPED;
1595 if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
1596 err = parent->d_op->d_hash(parent, nd->inode,
1597 &this);
1598 if (err < 0)
1599 break;
1600 }
1601 }
1602
1603 if (!name[len])
1604 goto last_component;
1605 /*
1606 * If it wasn't NUL, we know it was '/'. Skip that
1607 * slash, and continue until no more slashes.
1608 */
1609 do {
1610 len++;
1611 } while (unlikely(name[len] == '/'));
1612 if (!name[len])
1613 goto last_component;
1614 name += len;
1615
1616 err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW);
1617 if (err < 0)
1618 return err;
1619
1620 if (err) {
1621 err = nested_symlink(&next, nd);
1622 if (err)
1623 return err;
1624 }
1625 if (can_lookup(nd->inode))
1626 continue;
1627 err = -ENOTDIR;
1628 break;
1629 /* here ends the main loop */
1630
1631last_component:
1632 nd->last = this;
1633 nd->last_type = type;
1634 return 0;
1635 }
1636 terminate_walk(nd);
1637 return err;
1638}
1639
1640static int path_init(int dfd, const char *name, unsigned int flags,
1641 struct nameidata *nd, struct file **fp)
1642{
1643 int retval = 0;
1644 int fput_needed;
1645 struct file *file;
1646
1647 nd->last_type = LAST_ROOT; /* if there are only slashes... */
1648 nd->flags = flags | LOOKUP_JUMPED;
1649 nd->depth = 0;
1650 if (flags & LOOKUP_ROOT) {
1651 struct inode *inode = nd->root.dentry->d_inode;
1652 if (*name) {
1653 if (!inode->i_op->lookup)
1654 return -ENOTDIR;
1655 retval = inode_permission(inode, MAY_EXEC);
1656 if (retval)
1657 return retval;
1658 }
1659 nd->path = nd->root;
1660 nd->inode = inode;
1661 if (flags & LOOKUP_RCU) {
1662 br_read_lock(&vfsmount_lock);
1663 rcu_read_lock();
1664 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1665 } else {
1666 path_get(&nd->path);
1667 }
1668 return 0;
1669 }
1670
1671 nd->root.mnt = NULL;
1672
1673 if (*name=='/') {
1674 if (flags & LOOKUP_RCU) {
1675 br_read_lock(&vfsmount_lock);
1676 rcu_read_lock();
1677 set_root_rcu(nd);
1678 } else {
1679 set_root(nd);
1680 path_get(&nd->root);
1681 }
1682 nd->path = nd->root;
1683 } else if (dfd == AT_FDCWD) {
1684 if (flags & LOOKUP_RCU) {
1685 struct fs_struct *fs = current->fs;
1686 unsigned seq;
1687
1688 br_read_lock(&vfsmount_lock);
1689 rcu_read_lock();
1690
1691 do {
1692 seq = read_seqcount_begin(&fs->seq);
1693 nd->path = fs->pwd;
1694 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1695 } while (read_seqcount_retry(&fs->seq, seq));
1696 } else {
1697 get_fs_pwd(current->fs, &nd->path);
1698 }
1699 } else {
1700 struct dentry *dentry;
1701
1702 file = fget_raw_light(dfd, &fput_needed);
1703 retval = -EBADF;
1704 if (!file)
1705 goto out_fail;
1706
1707 dentry = file->f_path.dentry;
1708
1709 if (*name) {
1710 retval = -ENOTDIR;
1711 if (!S_ISDIR(dentry->d_inode->i_mode))
1712 goto fput_fail;
1713
1714 retval = inode_permission(dentry->d_inode, MAY_EXEC);
1715 if (retval)
1716 goto fput_fail;
1717 }
1718
1719 nd->path = file->f_path;
1720 if (flags & LOOKUP_RCU) {
1721 if (fput_needed)
1722 *fp = file;
1723 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1724 br_read_lock(&vfsmount_lock);
1725 rcu_read_lock();
1726 } else {
1727 path_get(&file->f_path);
1728 fput_light(file, fput_needed);
1729 }
1730 }
1731
1732 nd->inode = nd->path.dentry->d_inode;
1733 return 0;
1734
1735fput_fail:
1736 fput_light(file, fput_needed);
1737out_fail:
1738 return retval;
1739}
1740
1741static inline int lookup_last(struct nameidata *nd, struct path *path)
1742{
1743 if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
1744 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
1745
1746 nd->flags &= ~LOOKUP_PARENT;
1747 return walk_component(nd, path, &nd->last, nd->last_type,
1748 nd->flags & LOOKUP_FOLLOW);
1749}
1750
1751/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
1752static int path_lookupat(int dfd, const char *name,
1753 unsigned int flags, struct nameidata *nd)
1754{
1755 struct file *base = NULL;
1756 struct path path;
1757 int err;
1758
1759 /*
1760 * Path walking is largely split up into 2 different synchronisation
1761 * schemes, rcu-walk and ref-walk (explained in
1762 * Documentation/filesystems/path-lookup.txt). These share much of the
1763 * path walk code, but some things particularly setup, cleanup, and
1764 * following mounts are sufficiently divergent that functions are
1765 * duplicated. Typically there is a function foo(), and its RCU
1766 * analogue, foo_rcu().
1767 *
1768 * -ECHILD is the error number of choice (just to avoid clashes) that
1769 * is returned if some aspect of an rcu-walk fails. Such an error must
1770 * be handled by restarting a traditional ref-walk (which will always
1771 * be able to complete).
1772 */
1773 err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base);
1774
1775 if (unlikely(err))
1776 return err;
1777
1778 current->total_link_count = 0;
1779 err = link_path_walk(name, nd);
1780
1781 if (!err && !(flags & LOOKUP_PARENT)) {
1782 err = lookup_last(nd, &path);
1783 while (err > 0) {
1784 void *cookie;
1785 struct path link = path;
1786 nd->flags |= LOOKUP_PARENT;
1787 err = follow_link(&link, nd, &cookie);
1788 if (err)
1789 break;
1790 err = lookup_last(nd, &path);
1791 put_link(nd, &link, cookie);
1792 }
1793 }
1794
1795 if (!err)
1796 err = complete_walk(nd);
1797
1798 if (!err && nd->flags & LOOKUP_DIRECTORY) {
1799 if (!nd->inode->i_op->lookup) {
1800 path_put(&nd->path);
1801 err = -ENOTDIR;
1802 }
1803 }
1804
1805 if (base)
1806 fput(base);
1807
1808 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
1809 path_put(&nd->root);
1810 nd->root.mnt = NULL;
1811 }
1812 return err;
1813}
1814
1815static int do_path_lookup(int dfd, const char *name,
1816 unsigned int flags, struct nameidata *nd)
1817{
1818 int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd);
1819 if (unlikely(retval == -ECHILD))
1820 retval = path_lookupat(dfd, name, flags, nd);
1821 if (unlikely(retval == -ESTALE))
1822 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
1823
1824 if (likely(!retval)) {
1825 if (unlikely(!audit_dummy_context())) {
1826 if (nd->path.dentry && nd->inode)
1827 audit_inode(name, nd->path.dentry);
1828 }
1829 }
1830 return retval;
1831}
1832
1833int kern_path_parent(const char *name, struct nameidata *nd)
1834{
1835 return do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, nd);
1836}
1837
1838int kern_path(const char *name, unsigned int flags, struct path *path)
1839{
1840 struct nameidata nd;
1841 int res = do_path_lookup(AT_FDCWD, name, flags, &nd);
1842 if (!res)
1843 *path = nd.path;
1844 return res;
1845}
1846
1847/**
1848 * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
1849 * @dentry: pointer to dentry of the base directory
1850 * @mnt: pointer to vfs mount of the base directory
1851 * @name: pointer to file name
1852 * @flags: lookup flags
1853 * @path: pointer to struct path to fill
1854 */
1855int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
1856 const char *name, unsigned int flags,
1857 struct path *path)
1858{
1859 struct nameidata nd;
1860 int err;
1861 nd.root.dentry = dentry;
1862 nd.root.mnt = mnt;
1863 BUG_ON(flags & LOOKUP_PARENT);
1864 /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */
1865 err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd);
1866 if (!err)
1867 *path = nd.path;
1868 return err;
1869}
1870
1871/*
1872 * Restricted form of lookup. Doesn't follow links, single-component only,
1873 * needs parent already locked. Doesn't follow mounts.
1874 * SMP-safe.
1875 */
1876static struct dentry *lookup_hash(struct nameidata *nd)
1877{
1878 return __lookup_hash(&nd->last, nd->path.dentry, nd);
1879}
1880
1881/**
1882 * lookup_one_len - filesystem helper to lookup single pathname component
1883 * @name: pathname component to lookup
1884 * @base: base directory to lookup from
1885 * @len: maximum length @len should be interpreted to
1886 *
1887 * Note that this routine is purely a helper for filesystem usage and should
1888 * not be called by generic code. Also note that by using this function the
1889 * nameidata argument is passed to the filesystem methods and a filesystem
1890 * using this helper needs to be prepared for that.
1891 */
1892struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
1893{
1894 struct qstr this;
1895 unsigned int c;
1896 int err;
1897
1898 WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
1899
1900 this.name = name;
1901 this.len = len;
1902 this.hash = full_name_hash(name, len);
1903 if (!len)
1904 return ERR_PTR(-EACCES);
1905
1906 while (len--) {
1907 c = *(const unsigned char *)name++;
1908 if (c == '/' || c == '\0')
1909 return ERR_PTR(-EACCES);
1910 }
1911 /*
1912 * See if the low-level filesystem might want
1913 * to use its own hash..
1914 */
1915 if (base->d_flags & DCACHE_OP_HASH) {
1916 int err = base->d_op->d_hash(base, base->d_inode, &this);
1917 if (err < 0)
1918 return ERR_PTR(err);
1919 }
1920
1921 err = inode_permission(base->d_inode, MAY_EXEC);
1922 if (err)
1923 return ERR_PTR(err);
1924
1925 return __lookup_hash(&this, base, NULL);
1926}
1927
1928int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
1929 struct path *path, int *empty)
1930{
1931 struct nameidata nd;
1932 char *tmp = getname_flags(name, flags, empty);
1933 int err = PTR_ERR(tmp);
1934 if (!IS_ERR(tmp)) {
1935
1936 BUG_ON(flags & LOOKUP_PARENT);
1937
1938 err = do_path_lookup(dfd, tmp, flags, &nd);
1939 putname(tmp);
1940 if (!err)
1941 *path = nd.path;
1942 }
1943 return err;
1944}
1945
1946int user_path_at(int dfd, const char __user *name, unsigned flags,
1947 struct path *path)
1948{
1949 return user_path_at_empty(dfd, name, flags, path, NULL);
1950}
1951
1952static int user_path_parent(int dfd, const char __user *path,
1953 struct nameidata *nd, char **name)
1954{
1955 char *s = getname(path);
1956 int error;
1957
1958 if (IS_ERR(s))
1959 return PTR_ERR(s);
1960
1961 error = do_path_lookup(dfd, s, LOOKUP_PARENT, nd);
1962 if (error)
1963 putname(s);
1964 else
1965 *name = s;
1966
1967 return error;
1968}
1969
1970/*
1971 * It's inline, so penalty for filesystems that don't use sticky bit is
1972 * minimal.
1973 */
1974static inline int check_sticky(struct inode *dir, struct inode *inode)
1975{
1976 kuid_t fsuid = current_fsuid();
1977
1978 if (!(dir->i_mode & S_ISVTX))
1979 return 0;
1980 if (uid_eq(inode->i_uid, fsuid))
1981 return 0;
1982 if (uid_eq(dir->i_uid, fsuid))
1983 return 0;
1984 return !inode_capable(inode, CAP_FOWNER);
1985}
1986
1987/*
1988 * Check whether we can remove a link victim from directory dir, check
1989 * whether the type of victim is right.
1990 * 1. We can't do it if dir is read-only (done in permission())
1991 * 2. We should have write and exec permissions on dir
1992 * 3. We can't remove anything from append-only dir
1993 * 4. We can't do anything with immutable dir (done in permission())
1994 * 5. If the sticky bit on dir is set we should either
1995 * a. be owner of dir, or
1996 * b. be owner of victim, or
1997 * c. have CAP_FOWNER capability
1998 * 6. If the victim is append-only or immutable we can't do antyhing with
1999 * links pointing to it.
2000 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
2001 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
2002 * 9. We can't remove a root or mountpoint.
2003 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
2004 * nfs_async_unlink().
2005 */
2006static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
2007{
2008 int error;
2009
2010 if (!victim->d_inode)
2011 return -ENOENT;
2012
2013 BUG_ON(victim->d_parent->d_inode != dir);
2014 audit_inode_child(victim, dir);
2015
2016 error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
2017 if (error)
2018 return error;
2019 if (IS_APPEND(dir))
2020 return -EPERM;
2021 if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
2022 IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
2023 return -EPERM;
2024 if (isdir) {
2025 if (!S_ISDIR(victim->d_inode->i_mode))
2026 return -ENOTDIR;
2027 if (IS_ROOT(victim))
2028 return -EBUSY;
2029 } else if (S_ISDIR(victim->d_inode->i_mode))
2030 return -EISDIR;
2031 if (IS_DEADDIR(dir))
2032 return -ENOENT;
2033 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
2034 return -EBUSY;
2035 return 0;
2036}
2037
2038/* Check whether we can create an object with dentry child in directory
2039 * dir.
2040 * 1. We can't do it if child already exists (open has special treatment for
2041 * this case, but since we are inlined it's OK)
2042 * 2. We can't do it if dir is read-only (done in permission())
2043 * 3. We should have write and exec permissions on dir
2044 * 4. We can't do it if dir is immutable (done in permission())
2045 */
2046static inline int may_create(struct inode *dir, struct dentry *child)
2047{
2048 if (child->d_inode)
2049 return -EEXIST;
2050 if (IS_DEADDIR(dir))
2051 return -ENOENT;
2052 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
2053}
2054
2055/*
2056 * p1 and p2 should be directories on the same fs.
2057 */
2058struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
2059{
2060 struct dentry *p;
2061
2062 if (p1 == p2) {
2063 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
2064 return NULL;
2065 }
2066
2067 mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
2068
2069 p = d_ancestor(p2, p1);
2070 if (p) {
2071 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT);
2072 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD);
2073 return p;
2074 }
2075
2076 p = d_ancestor(p1, p2);
2077 if (p) {
2078 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
2079 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
2080 return p;
2081 }
2082
2083 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
2084 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
2085 return NULL;
2086}
2087
2088void unlock_rename(struct dentry *p1, struct dentry *p2)
2089{
2090 mutex_unlock(&p1->d_inode->i_mutex);
2091 if (p1 != p2) {
2092 mutex_unlock(&p2->d_inode->i_mutex);
2093 mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
2094 }
2095}
2096
2097int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2098 struct nameidata *nd)
2099{
2100 int error = may_create(dir, dentry);
2101
2102 if (error)
2103 return error;
2104
2105 if (!dir->i_op->create)
2106 return -EACCES; /* shouldn't it be ENOSYS? */
2107 mode &= S_IALLUGO;
2108 mode |= S_IFREG;
2109 error = security_inode_create(dir, dentry, mode);
2110 if (error)
2111 return error;
2112 error = dir->i_op->create(dir, dentry, mode, nd);
2113 if (!error)
2114 fsnotify_create(dir, dentry);
2115 return error;
2116}
2117
2118static int may_open(struct path *path, int acc_mode, int flag)
2119{
2120 struct dentry *dentry = path->dentry;
2121 struct inode *inode = dentry->d_inode;
2122 int error;
2123
2124 /* O_PATH? */
2125 if (!acc_mode)
2126 return 0;
2127
2128 if (!inode)
2129 return -ENOENT;
2130
2131 switch (inode->i_mode & S_IFMT) {
2132 case S_IFLNK:
2133 return -ELOOP;
2134 case S_IFDIR:
2135 if (acc_mode & MAY_WRITE)
2136 return -EISDIR;
2137 break;
2138 case S_IFBLK:
2139 case S_IFCHR:
2140 if (path->mnt->mnt_flags & MNT_NODEV)
2141 return -EACCES;
2142 /*FALLTHRU*/
2143 case S_IFIFO:
2144 case S_IFSOCK:
2145 flag &= ~O_TRUNC;
2146 break;
2147 }
2148
2149 error = inode_permission(inode, acc_mode);
2150 if (error)
2151 return error;
2152
2153 /*
2154 * An append-only file must be opened in append mode for writing.
2155 */
2156 if (IS_APPEND(inode)) {
2157 if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
2158 return -EPERM;
2159 if (flag & O_TRUNC)
2160 return -EPERM;
2161 }
2162
2163 /* O_NOATIME can only be set by the owner or superuser */
2164 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
2165 return -EPERM;
2166
2167 return 0;
2168}
2169
2170static int handle_truncate(struct file *filp)
2171{
2172 struct path *path = &filp->f_path;
2173 struct inode *inode = path->dentry->d_inode;
2174 int error = get_write_access(inode);
2175 if (error)
2176 return error;
2177 /*
2178 * Refuse to truncate files with mandatory locks held on them.
2179 */
2180 error = locks_verify_locked(inode);
2181 if (!error)
2182 error = security_path_truncate(path);
2183 if (!error) {
2184 error = do_truncate(path->dentry, 0,
2185 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
2186 filp);
2187 }
2188 put_write_access(inode);
2189 return error;
2190}
2191
2192static inline int open_to_namei_flags(int flag)
2193{
2194 if ((flag & O_ACCMODE) == 3)
2195 flag--;
2196 return flag;
2197}
2198
2199/*
2200 * Lookup, maybe create and open the last component
2201 *
2202 * Must be called with i_mutex held on parent.
2203 *
2204 * Returns open file or NULL on success, error otherwise. NULL means no open
2205 * was performed, only lookup.
2206 */
2207static struct file *lookup_open(struct nameidata *nd, struct path *path,
2208 const struct open_flags *op,
2209 int *want_write, bool *created)
2210{
2211 struct dentry *dir = nd->path.dentry;
2212 struct dentry *dentry;
2213 int error;
2214
2215 *created = false;
2216 dentry = lookup_hash(nd);
2217 if (IS_ERR(dentry))
2218 return ERR_CAST(dentry);
2219
2220 /* Negative dentry, just create the file */
2221 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
2222 umode_t mode = op->mode;
2223 if (!IS_POSIXACL(dir->d_inode))
2224 mode &= ~current_umask();
2225 /*
2226 * This write is needed to ensure that a
2227 * rw->ro transition does not occur between
2228 * the time when the file is created and when
2229 * a permanent write count is taken through
2230 * the 'struct file' in nameidata_to_filp().
2231 */
2232 error = mnt_want_write(nd->path.mnt);
2233 if (error)
2234 goto out_dput;
2235 *want_write = 1;
2236 *created = true;
2237 error = security_path_mknod(&nd->path, dentry, mode, 0);
2238 if (error)
2239 goto out_dput;
2240 error = vfs_create(dir->d_inode, dentry, mode, nd);
2241 if (error)
2242 goto out_dput;
2243 }
2244 path->dentry = dentry;
2245 path->mnt = nd->path.mnt;
2246 return NULL;
2247
2248out_dput:
2249 dput(dentry);
2250 return ERR_PTR(error);
2251}
2252
2253/*
2254 * Handle the last step of open()
2255 */
2256static struct file *do_last(struct nameidata *nd, struct path *path,
2257 const struct open_flags *op, const char *pathname)
2258{
2259 struct dentry *dir = nd->path.dentry;
2260 int open_flag = op->open_flag;
2261 int will_truncate = open_flag & O_TRUNC;
2262 int want_write = 0;
2263 int acc_mode = op->acc_mode;
2264 struct file *filp;
2265 struct inode *inode;
2266 bool created;
2267 int symlink_ok = 0;
2268 struct path save_parent = { .dentry = NULL, .mnt = NULL };
2269 bool retried = false;
2270 int error;
2271
2272 nd->flags &= ~LOOKUP_PARENT;
2273 nd->flags |= op->intent;
2274
2275 switch (nd->last_type) {
2276 case LAST_DOTDOT:
2277 case LAST_DOT:
2278 error = handle_dots(nd, nd->last_type);
2279 if (error)
2280 return ERR_PTR(error);
2281 /* fallthrough */
2282 case LAST_ROOT:
2283 error = complete_walk(nd);
2284 if (error)
2285 return ERR_PTR(error);
2286 audit_inode(pathname, nd->path.dentry);
2287 if (open_flag & O_CREAT) {
2288 error = -EISDIR;
2289 goto exit;
2290 }
2291 goto ok;
2292 case LAST_BIND:
2293 error = complete_walk(nd);
2294 if (error)
2295 return ERR_PTR(error);
2296 audit_inode(pathname, dir);
2297 goto ok;
2298 }
2299
2300 if (!(open_flag & O_CREAT)) {
2301 if (nd->last.name[nd->last.len])
2302 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
2303 if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
2304 symlink_ok = 1;
2305 /* we _can_ be in RCU mode here */
2306 error = lookup_fast(nd, &nd->last, path, &inode);
2307 if (likely(!error))
2308 goto finish_lookup;
2309
2310 if (error < 0)
2311 goto exit;
2312
2313 BUG_ON(nd->inode != dir->d_inode);
2314 } else {
2315 /* create side of things */
2316 /*
2317 * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED
2318 * has been cleared when we got to the last component we are
2319 * about to look up
2320 */
2321 error = complete_walk(nd);
2322 if (error)
2323 return ERR_PTR(error);
2324
2325 audit_inode(pathname, dir);
2326 error = -EISDIR;
2327 /* trailing slashes? */
2328 if (nd->last.name[nd->last.len])
2329 goto exit;
2330 }
2331
2332retry_lookup:
2333 mutex_lock(&dir->d_inode->i_mutex);
2334 filp = lookup_open(nd, path, op, &want_write, &created);
2335 mutex_unlock(&dir->d_inode->i_mutex);
2336
2337 if (IS_ERR(filp))
2338 goto out;
2339
2340 if (created) {
2341 /* Don't check for write permission, don't truncate */
2342 open_flag &= ~O_TRUNC;
2343 will_truncate = 0;
2344 acc_mode = MAY_OPEN;
2345 path_to_nameidata(path, nd);
2346 goto common;
2347 }
2348
2349 /*
2350 * It already exists.
2351 */
2352 audit_inode(pathname, path->dentry);
2353
2354 error = -EEXIST;
2355 if (open_flag & O_EXCL)
2356 goto exit_dput;
2357
2358 error = follow_managed(path, nd->flags);
2359 if (error < 0)
2360 goto exit_dput;
2361
2362 if (error)
2363 nd->flags |= LOOKUP_JUMPED;
2364
2365 BUG_ON(nd->flags & LOOKUP_RCU);
2366 inode = path->dentry->d_inode;
2367finish_lookup:
2368 /* we _can_ be in RCU mode here */
2369 error = -ENOENT;
2370 if (!inode) {
2371 path_to_nameidata(path, nd);
2372 goto exit;
2373 }
2374
2375 if (should_follow_link(inode, !symlink_ok)) {
2376 if (nd->flags & LOOKUP_RCU) {
2377 if (unlikely(unlazy_walk(nd, path->dentry))) {
2378 error = -ECHILD;
2379 goto exit;
2380 }
2381 }
2382 BUG_ON(inode != path->dentry->d_inode);
2383 return NULL;
2384 }
2385
2386 if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path->mnt) {
2387 path_to_nameidata(path, nd);
2388 } else {
2389 save_parent.dentry = nd->path.dentry;
2390 save_parent.mnt = mntget(path->mnt);
2391 nd->path.dentry = path->dentry;
2392
2393 }
2394 nd->inode = inode;
2395 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
2396 error = complete_walk(nd);
2397 if (error) {
2398 path_put(&save_parent);
2399 return ERR_PTR(error);
2400 }
2401 error = -EISDIR;
2402 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
2403 goto exit;
2404 error = -ENOTDIR;
2405 if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup)
2406 goto exit;
2407 audit_inode(pathname, nd->path.dentry);
2408ok:
2409 if (!S_ISREG(nd->inode->i_mode))
2410 will_truncate = 0;
2411
2412 if (will_truncate) {
2413 error = mnt_want_write(nd->path.mnt);
2414 if (error)
2415 goto exit;
2416 want_write = 1;
2417 }
2418common:
2419 error = may_open(&nd->path, acc_mode, open_flag);
2420 if (error)
2421 goto exit;
2422 filp = nameidata_to_filp(nd);
2423 if (filp == ERR_PTR(-EOPENSTALE) && save_parent.dentry && !retried) {
2424 BUG_ON(save_parent.dentry != dir);
2425 path_put(&nd->path);
2426 nd->path = save_parent;
2427 nd->inode = dir->d_inode;
2428 save_parent.mnt = NULL;
2429 save_parent.dentry = NULL;
2430 if (want_write) {
2431 mnt_drop_write(nd->path.mnt);
2432 want_write = 0;
2433 }
2434 retried = true;
2435 goto retry_lookup;
2436 }
2437 if (!IS_ERR(filp)) {
2438 error = ima_file_check(filp, op->acc_mode);
2439 if (error) {
2440 fput(filp);
2441 filp = ERR_PTR(error);
2442 }
2443 }
2444 if (!IS_ERR(filp)) {
2445 if (will_truncate) {
2446 error = handle_truncate(filp);
2447 if (error) {
2448 fput(filp);
2449 filp = ERR_PTR(error);
2450 }
2451 }
2452 }
2453out:
2454 if (want_write)
2455 mnt_drop_write(nd->path.mnt);
2456 path_put(&save_parent);
2457 terminate_walk(nd);
2458 return filp;
2459
2460exit_dput:
2461 path_put_conditional(path, nd);
2462exit:
2463 filp = ERR_PTR(error);
2464 goto out;
2465}
2466
2467static struct file *path_openat(int dfd, const char *pathname,
2468 struct nameidata *nd, const struct open_flags *op, int flags)
2469{
2470 struct file *base = NULL;
2471 struct file *filp;
2472 struct path path;
2473 int error;
2474
2475 filp = get_empty_filp();
2476 if (!filp)
2477 return ERR_PTR(-ENFILE);
2478
2479 filp->f_flags = op->open_flag;
2480 nd->intent.open.file = filp;
2481 nd->intent.open.flags = open_to_namei_flags(op->open_flag);
2482 nd->intent.open.create_mode = op->mode;
2483
2484 error = path_init(dfd, pathname, flags | LOOKUP_PARENT, nd, &base);
2485 if (unlikely(error))
2486 goto out_filp;
2487
2488 current->total_link_count = 0;
2489 error = link_path_walk(pathname, nd);
2490 if (unlikely(error))
2491 goto out_filp;
2492
2493 filp = do_last(nd, &path, op, pathname);
2494 while (unlikely(!filp)) { /* trailing symlink */
2495 struct path link = path;
2496 void *cookie;
2497 if (!(nd->flags & LOOKUP_FOLLOW)) {
2498 path_put_conditional(&path, nd);
2499 path_put(&nd->path);
2500 filp = ERR_PTR(-ELOOP);
2501 break;
2502 }
2503 nd->flags |= LOOKUP_PARENT;
2504 nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
2505 error = follow_link(&link, nd, &cookie);
2506 if (unlikely(error))
2507 goto out_filp;
2508 filp = do_last(nd, &path, op, pathname);
2509 put_link(nd, &link, cookie);
2510 }
2511out:
2512 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT))
2513 path_put(&nd->root);
2514 if (base)
2515 fput(base);
2516 release_open_intent(nd);
2517 if (filp == ERR_PTR(-EOPENSTALE)) {
2518 if (flags & LOOKUP_RCU)
2519 filp = ERR_PTR(-ECHILD);
2520 else
2521 filp = ERR_PTR(-ESTALE);
2522 }
2523 return filp;
2524
2525out_filp:
2526 filp = ERR_PTR(error);
2527 goto out;
2528}
2529
2530struct file *do_filp_open(int dfd, const char *pathname,
2531 const struct open_flags *op, int flags)
2532{
2533 struct nameidata nd;
2534 struct file *filp;
2535
2536 filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_RCU);
2537 if (unlikely(filp == ERR_PTR(-ECHILD)))
2538 filp = path_openat(dfd, pathname, &nd, op, flags);
2539 if (unlikely(filp == ERR_PTR(-ESTALE)))
2540 filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_REVAL);
2541 return filp;
2542}
2543
2544struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
2545 const char *name, const struct open_flags *op, int flags)
2546{
2547 struct nameidata nd;
2548 struct file *file;
2549
2550 nd.root.mnt = mnt;
2551 nd.root.dentry = dentry;
2552
2553 flags |= LOOKUP_ROOT;
2554
2555 if (dentry->d_inode->i_op->follow_link && op->intent & LOOKUP_OPEN)
2556 return ERR_PTR(-ELOOP);
2557
2558 file = path_openat(-1, name, &nd, op, flags | LOOKUP_RCU);
2559 if (unlikely(file == ERR_PTR(-ECHILD)))
2560 file = path_openat(-1, name, &nd, op, flags);
2561 if (unlikely(file == ERR_PTR(-ESTALE)))
2562 file = path_openat(-1, name, &nd, op, flags | LOOKUP_REVAL);
2563 return file;
2564}
2565
2566struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, int is_dir)
2567{
2568 struct dentry *dentry = ERR_PTR(-EEXIST);
2569 struct nameidata nd;
2570 int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd);
2571 if (error)
2572 return ERR_PTR(error);
2573
2574 /*
2575 * Yucky last component or no last component at all?
2576 * (foo/., foo/.., /////)
2577 */
2578 if (nd.last_type != LAST_NORM)
2579 goto out;
2580 nd.flags &= ~LOOKUP_PARENT;
2581 nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL;
2582 nd.intent.open.flags = O_EXCL;
2583
2584 /*
2585 * Do the final lookup.
2586 */
2587 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
2588 dentry = lookup_hash(&nd);
2589 if (IS_ERR(dentry))
2590 goto fail;
2591
2592 if (dentry->d_inode)
2593 goto eexist;
2594 /*
2595 * Special case - lookup gave negative, but... we had foo/bar/
2596 * From the vfs_mknod() POV we just have a negative dentry -
2597 * all is fine. Let's be bastards - you had / on the end, you've
2598 * been asking for (non-existent) directory. -ENOENT for you.
2599 */
2600 if (unlikely(!is_dir && nd.last.name[nd.last.len])) {
2601 dput(dentry);
2602 dentry = ERR_PTR(-ENOENT);
2603 goto fail;
2604 }
2605 *path = nd.path;
2606 return dentry;
2607eexist:
2608 dput(dentry);
2609 dentry = ERR_PTR(-EEXIST);
2610fail:
2611 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
2612out:
2613 path_put(&nd.path);
2614 return dentry;
2615}
2616EXPORT_SYMBOL(kern_path_create);
2617
2618struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir)
2619{
2620 char *tmp = getname(pathname);
2621 struct dentry *res;
2622 if (IS_ERR(tmp))
2623 return ERR_CAST(tmp);
2624 res = kern_path_create(dfd, tmp, path, is_dir);
2625 putname(tmp);
2626 return res;
2627}
2628EXPORT_SYMBOL(user_path_create);
2629
2630int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2631{
2632 int error = may_create(dir, dentry);
2633
2634 if (error)
2635 return error;
2636
2637 if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
2638 return -EPERM;
2639
2640 if (!dir->i_op->mknod)
2641 return -EPERM;
2642
2643 error = devcgroup_inode_mknod(mode, dev);
2644 if (error)
2645 return error;
2646
2647 error = security_inode_mknod(dir, dentry, mode, dev);
2648 if (error)
2649 return error;
2650
2651 error = dir->i_op->mknod(dir, dentry, mode, dev);
2652 if (!error)
2653 fsnotify_create(dir, dentry);
2654 return error;
2655}
2656
2657static int may_mknod(umode_t mode)
2658{
2659 switch (mode & S_IFMT) {
2660 case S_IFREG:
2661 case S_IFCHR:
2662 case S_IFBLK:
2663 case S_IFIFO:
2664 case S_IFSOCK:
2665 case 0: /* zero mode translates to S_IFREG */
2666 return 0;
2667 case S_IFDIR:
2668 return -EPERM;
2669 default:
2670 return -EINVAL;
2671 }
2672}
2673
2674SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
2675 unsigned, dev)
2676{
2677 struct dentry *dentry;
2678 struct path path;
2679 int error;
2680
2681 if (S_ISDIR(mode))
2682 return -EPERM;
2683
2684 dentry = user_path_create(dfd, filename, &path, 0);
2685 if (IS_ERR(dentry))
2686 return PTR_ERR(dentry);
2687
2688 if (!IS_POSIXACL(path.dentry->d_inode))
2689 mode &= ~current_umask();
2690 error = may_mknod(mode);
2691 if (error)
2692 goto out_dput;
2693 error = mnt_want_write(path.mnt);
2694 if (error)
2695 goto out_dput;
2696 error = security_path_mknod(&path, dentry, mode, dev);
2697 if (error)
2698 goto out_drop_write;
2699 switch (mode & S_IFMT) {
2700 case 0: case S_IFREG:
2701 error = vfs_create(path.dentry->d_inode,dentry,mode,NULL);
2702 break;
2703 case S_IFCHR: case S_IFBLK:
2704 error = vfs_mknod(path.dentry->d_inode,dentry,mode,
2705 new_decode_dev(dev));
2706 break;
2707 case S_IFIFO: case S_IFSOCK:
2708 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
2709 break;
2710 }
2711out_drop_write:
2712 mnt_drop_write(path.mnt);
2713out_dput:
2714 dput(dentry);
2715 mutex_unlock(&path.dentry->d_inode->i_mutex);
2716 path_put(&path);
2717
2718 return error;
2719}
2720
2721SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev)
2722{
2723 return sys_mknodat(AT_FDCWD, filename, mode, dev);
2724}
2725
2726int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2727{
2728 int error = may_create(dir, dentry);
2729 unsigned max_links = dir->i_sb->s_max_links;
2730
2731 if (error)
2732 return error;
2733
2734 if (!dir->i_op->mkdir)
2735 return -EPERM;
2736
2737 mode &= (S_IRWXUGO|S_ISVTX);
2738 error = security_inode_mkdir(dir, dentry, mode);
2739 if (error)
2740 return error;
2741
2742 if (max_links && dir->i_nlink >= max_links)
2743 return -EMLINK;
2744
2745 error = dir->i_op->mkdir(dir, dentry, mode);
2746 if (!error)
2747 fsnotify_mkdir(dir, dentry);
2748 return error;
2749}
2750
2751SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
2752{
2753 struct dentry *dentry;
2754 struct path path;
2755 int error;
2756
2757 dentry = user_path_create(dfd, pathname, &path, 1);
2758 if (IS_ERR(dentry))
2759 return PTR_ERR(dentry);
2760
2761 if (!IS_POSIXACL(path.dentry->d_inode))
2762 mode &= ~current_umask();
2763 error = mnt_want_write(path.mnt);
2764 if (error)
2765 goto out_dput;
2766 error = security_path_mkdir(&path, dentry, mode);
2767 if (error)
2768 goto out_drop_write;
2769 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
2770out_drop_write:
2771 mnt_drop_write(path.mnt);
2772out_dput:
2773 dput(dentry);
2774 mutex_unlock(&path.dentry->d_inode->i_mutex);
2775 path_put(&path);
2776 return error;
2777}
2778
2779SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
2780{
2781 return sys_mkdirat(AT_FDCWD, pathname, mode);
2782}
2783
2784/*
2785 * The dentry_unhash() helper will try to drop the dentry early: we
2786 * should have a usage count of 1 if we're the only user of this
2787 * dentry, and if that is true (possibly after pruning the dcache),
2788 * then we drop the dentry now.
2789 *
2790 * A low-level filesystem can, if it choses, legally
2791 * do a
2792 *
2793 * if (!d_unhashed(dentry))
2794 * return -EBUSY;
2795 *
2796 * if it cannot handle the case of removing a directory
2797 * that is still in use by something else..
2798 */
2799void dentry_unhash(struct dentry *dentry)
2800{
2801 shrink_dcache_parent(dentry);
2802 spin_lock(&dentry->d_lock);
2803 if (dentry->d_count == 1)
2804 __d_drop(dentry);
2805 spin_unlock(&dentry->d_lock);
2806}
2807
2808int vfs_rmdir(struct inode *dir, struct dentry *dentry)
2809{
2810 int error = may_delete(dir, dentry, 1);
2811
2812 if (error)
2813 return error;
2814
2815 if (!dir->i_op->rmdir)
2816 return -EPERM;
2817
2818 dget(dentry);
2819 mutex_lock(&dentry->d_inode->i_mutex);
2820
2821 error = -EBUSY;
2822 if (d_mountpoint(dentry))
2823 goto out;
2824
2825 error = security_inode_rmdir(dir, dentry);
2826 if (error)
2827 goto out;
2828
2829 shrink_dcache_parent(dentry);
2830 error = dir->i_op->rmdir(dir, dentry);
2831 if (error)
2832 goto out;
2833
2834 dentry->d_inode->i_flags |= S_DEAD;
2835 dont_mount(dentry);
2836
2837out:
2838 mutex_unlock(&dentry->d_inode->i_mutex);
2839 dput(dentry);
2840 if (!error)
2841 d_delete(dentry);
2842 return error;
2843}
2844
2845static long do_rmdir(int dfd, const char __user *pathname)
2846{
2847 int error = 0;
2848 char * name;
2849 struct dentry *dentry;
2850 struct nameidata nd;
2851
2852 error = user_path_parent(dfd, pathname, &nd, &name);
2853 if (error)
2854 return error;
2855
2856 switch(nd.last_type) {
2857 case LAST_DOTDOT:
2858 error = -ENOTEMPTY;
2859 goto exit1;
2860 case LAST_DOT:
2861 error = -EINVAL;
2862 goto exit1;
2863 case LAST_ROOT:
2864 error = -EBUSY;
2865 goto exit1;
2866 }
2867
2868 nd.flags &= ~LOOKUP_PARENT;
2869
2870 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
2871 dentry = lookup_hash(&nd);
2872 error = PTR_ERR(dentry);
2873 if (IS_ERR(dentry))
2874 goto exit2;
2875 if (!dentry->d_inode) {
2876 error = -ENOENT;
2877 goto exit3;
2878 }
2879 error = mnt_want_write(nd.path.mnt);
2880 if (error)
2881 goto exit3;
2882 error = security_path_rmdir(&nd.path, dentry);
2883 if (error)
2884 goto exit4;
2885 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
2886exit4:
2887 mnt_drop_write(nd.path.mnt);
2888exit3:
2889 dput(dentry);
2890exit2:
2891 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
2892exit1:
2893 path_put(&nd.path);
2894 putname(name);
2895 return error;
2896}
2897
2898SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
2899{
2900 return do_rmdir(AT_FDCWD, pathname);
2901}
2902
2903int vfs_unlink(struct inode *dir, struct dentry *dentry)
2904{
2905 int error = may_delete(dir, dentry, 0);
2906
2907 if (error)
2908 return error;
2909
2910 if (!dir->i_op->unlink)
2911 return -EPERM;
2912
2913 mutex_lock(&dentry->d_inode->i_mutex);
2914 if (d_mountpoint(dentry))
2915 error = -EBUSY;
2916 else {
2917 error = security_inode_unlink(dir, dentry);
2918 if (!error) {
2919 error = dir->i_op->unlink(dir, dentry);
2920 if (!error)
2921 dont_mount(dentry);
2922 }
2923 }
2924 mutex_unlock(&dentry->d_inode->i_mutex);
2925
2926 /* We don't d_delete() NFS sillyrenamed files--they still exist. */
2927 if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
2928 fsnotify_link_count(dentry->d_inode);
2929 d_delete(dentry);
2930 }
2931
2932 return error;
2933}
2934
2935/*
2936 * Make sure that the actual truncation of the file will occur outside its
2937 * directory's i_mutex. Truncate can take a long time if there is a lot of
2938 * writeout happening, and we don't want to prevent access to the directory
2939 * while waiting on the I/O.
2940 */
2941static long do_unlinkat(int dfd, const char __user *pathname)
2942{
2943 int error;
2944 char *name;
2945 struct dentry *dentry;
2946 struct nameidata nd;
2947 struct inode *inode = NULL;
2948
2949 error = user_path_parent(dfd, pathname, &nd, &name);
2950 if (error)
2951 return error;
2952
2953 error = -EISDIR;
2954 if (nd.last_type != LAST_NORM)
2955 goto exit1;
2956
2957 nd.flags &= ~LOOKUP_PARENT;
2958
2959 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
2960 dentry = lookup_hash(&nd);
2961 error = PTR_ERR(dentry);
2962 if (!IS_ERR(dentry)) {
2963 /* Why not before? Because we want correct error value */
2964 if (nd.last.name[nd.last.len])
2965 goto slashes;
2966 inode = dentry->d_inode;
2967 if (!inode)
2968 goto slashes;
2969 ihold(inode);
2970 error = mnt_want_write(nd.path.mnt);
2971 if (error)
2972 goto exit2;
2973 error = security_path_unlink(&nd.path, dentry);
2974 if (error)
2975 goto exit3;
2976 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
2977exit3:
2978 mnt_drop_write(nd.path.mnt);
2979 exit2:
2980 dput(dentry);
2981 }
2982 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
2983 if (inode)
2984 iput(inode); /* truncate the inode here */
2985exit1:
2986 path_put(&nd.path);
2987 putname(name);
2988 return error;
2989
2990slashes:
2991 error = !dentry->d_inode ? -ENOENT :
2992 S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
2993 goto exit2;
2994}
2995
2996SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
2997{
2998 if ((flag & ~AT_REMOVEDIR) != 0)
2999 return -EINVAL;
3000
3001 if (flag & AT_REMOVEDIR)
3002 return do_rmdir(dfd, pathname);
3003
3004 return do_unlinkat(dfd, pathname);
3005}
3006
3007SYSCALL_DEFINE1(unlink, const char __user *, pathname)
3008{
3009 return do_unlinkat(AT_FDCWD, pathname);
3010}
3011
3012int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
3013{
3014 int error = may_create(dir, dentry);
3015
3016 if (error)
3017 return error;
3018
3019 if (!dir->i_op->symlink)
3020 return -EPERM;
3021
3022 error = security_inode_symlink(dir, dentry, oldname);
3023 if (error)
3024 return error;
3025
3026 error = dir->i_op->symlink(dir, dentry, oldname);
3027 if (!error)
3028 fsnotify_create(dir, dentry);
3029 return error;
3030}
3031
3032SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
3033 int, newdfd, const char __user *, newname)
3034{
3035 int error;
3036 char *from;
3037 struct dentry *dentry;
3038 struct path path;
3039
3040 from = getname(oldname);
3041 if (IS_ERR(from))
3042 return PTR_ERR(from);
3043
3044 dentry = user_path_create(newdfd, newname, &path, 0);
3045 error = PTR_ERR(dentry);
3046 if (IS_ERR(dentry))
3047 goto out_putname;
3048
3049 error = mnt_want_write(path.mnt);
3050 if (error)
3051 goto out_dput;
3052 error = security_path_symlink(&path, dentry, from);
3053 if (error)
3054 goto out_drop_write;
3055 error = vfs_symlink(path.dentry->d_inode, dentry, from);
3056out_drop_write:
3057 mnt_drop_write(path.mnt);
3058out_dput:
3059 dput(dentry);
3060 mutex_unlock(&path.dentry->d_inode->i_mutex);
3061 path_put(&path);
3062out_putname:
3063 putname(from);
3064 return error;
3065}
3066
3067SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname)
3068{
3069 return sys_symlinkat(oldname, AT_FDCWD, newname);
3070}
3071
3072int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
3073{
3074 struct inode *inode = old_dentry->d_inode;
3075 unsigned max_links = dir->i_sb->s_max_links;
3076 int error;
3077
3078 if (!inode)
3079 return -ENOENT;
3080
3081 error = may_create(dir, new_dentry);
3082 if (error)
3083 return error;
3084
3085 if (dir->i_sb != inode->i_sb)
3086 return -EXDEV;
3087
3088 /*
3089 * A link to an append-only or immutable file cannot be created.
3090 */
3091 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
3092 return -EPERM;
3093 if (!dir->i_op->link)
3094 return -EPERM;
3095 if (S_ISDIR(inode->i_mode))
3096 return -EPERM;
3097
3098 error = security_inode_link(old_dentry, dir, new_dentry);
3099 if (error)
3100 return error;
3101
3102 mutex_lock(&inode->i_mutex);
3103 /* Make sure we don't allow creating hardlink to an unlinked file */
3104 if (inode->i_nlink == 0)
3105 error = -ENOENT;
3106 else if (max_links && inode->i_nlink >= max_links)
3107 error = -EMLINK;
3108 else
3109 error = dir->i_op->link(old_dentry, dir, new_dentry);
3110 mutex_unlock(&inode->i_mutex);
3111 if (!error)
3112 fsnotify_link(dir, inode, new_dentry);
3113 return error;
3114}
3115
3116/*
3117 * Hardlinks are often used in delicate situations. We avoid
3118 * security-related surprises by not following symlinks on the
3119 * newname. --KAB
3120 *
3121 * We don't follow them on the oldname either to be compatible
3122 * with linux 2.0, and to avoid hard-linking to directories
3123 * and other special files. --ADM
3124 */
3125SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
3126 int, newdfd, const char __user *, newname, int, flags)
3127{
3128 struct dentry *new_dentry;
3129 struct path old_path, new_path;
3130 int how = 0;
3131 int error;
3132
3133 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
3134 return -EINVAL;
3135 /*
3136 * To use null names we require CAP_DAC_READ_SEARCH
3137 * This ensures that not everyone will be able to create
3138 * handlink using the passed filedescriptor.
3139 */
3140 if (flags & AT_EMPTY_PATH) {
3141 if (!capable(CAP_DAC_READ_SEARCH))
3142 return -ENOENT;
3143 how = LOOKUP_EMPTY;
3144 }
3145
3146 if (flags & AT_SYMLINK_FOLLOW)
3147 how |= LOOKUP_FOLLOW;
3148
3149 error = user_path_at(olddfd, oldname, how, &old_path);
3150 if (error)
3151 return error;
3152
3153 new_dentry = user_path_create(newdfd, newname, &new_path, 0);
3154 error = PTR_ERR(new_dentry);
3155 if (IS_ERR(new_dentry))
3156 goto out;
3157
3158 error = -EXDEV;
3159 if (old_path.mnt != new_path.mnt)
3160 goto out_dput;
3161 error = mnt_want_write(new_path.mnt);
3162 if (error)
3163 goto out_dput;
3164 error = security_path_link(old_path.dentry, &new_path, new_dentry);
3165 if (error)
3166 goto out_drop_write;
3167 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
3168out_drop_write:
3169 mnt_drop_write(new_path.mnt);
3170out_dput:
3171 dput(new_dentry);
3172 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
3173 path_put(&new_path);
3174out:
3175 path_put(&old_path);
3176
3177 return error;
3178}
3179
3180SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname)
3181{
3182 return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
3183}
3184
3185/*
3186 * The worst of all namespace operations - renaming directory. "Perverted"
3187 * doesn't even start to describe it. Somebody in UCB had a heck of a trip...
3188 * Problems:
3189 * a) we can get into loop creation. Check is done in is_subdir().
3190 * b) race potential - two innocent renames can create a loop together.
3191 * That's where 4.4 screws up. Current fix: serialization on
3192 * sb->s_vfs_rename_mutex. We might be more accurate, but that's another
3193 * story.
3194 * c) we have to lock _three_ objects - parents and victim (if it exists).
3195 * And that - after we got ->i_mutex on parents (until then we don't know
3196 * whether the target exists). Solution: try to be smart with locking
3197 * order for inodes. We rely on the fact that tree topology may change
3198 * only under ->s_vfs_rename_mutex _and_ that parent of the object we
3199 * move will be locked. Thus we can rank directories by the tree
3200 * (ancestors first) and rank all non-directories after them.
3201 * That works since everybody except rename does "lock parent, lookup,
3202 * lock child" and rename is under ->s_vfs_rename_mutex.
3203 * HOWEVER, it relies on the assumption that any object with ->lookup()
3204 * has no more than 1 dentry. If "hybrid" objects will ever appear,
3205 * we'd better make sure that there's no link(2) for them.
3206 * d) conversion from fhandle to dentry may come in the wrong moment - when
3207 * we are removing the target. Solution: we will have to grab ->i_mutex
3208 * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
3209 * ->i_mutex on parents, which works but leads to some truly excessive
3210 * locking].
3211 */
3212static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
3213 struct inode *new_dir, struct dentry *new_dentry)
3214{
3215 int error = 0;
3216 struct inode *target = new_dentry->d_inode;
3217 unsigned max_links = new_dir->i_sb->s_max_links;
3218
3219 /*
3220 * If we are going to change the parent - check write permissions,
3221 * we'll need to flip '..'.
3222 */
3223 if (new_dir != old_dir) {
3224 error = inode_permission(old_dentry->d_inode, MAY_WRITE);
3225 if (error)
3226 return error;
3227 }
3228
3229 error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
3230 if (error)
3231 return error;
3232
3233 dget(new_dentry);
3234 if (target)
3235 mutex_lock(&target->i_mutex);
3236
3237 error = -EBUSY;
3238 if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry))
3239 goto out;
3240
3241 error = -EMLINK;
3242 if (max_links && !target && new_dir != old_dir &&
3243 new_dir->i_nlink >= max_links)
3244 goto out;
3245
3246 if (target)
3247 shrink_dcache_parent(new_dentry);
3248 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
3249 if (error)
3250 goto out;
3251
3252 if (target) {
3253 target->i_flags |= S_DEAD;
3254 dont_mount(new_dentry);
3255 }
3256out:
3257 if (target)
3258 mutex_unlock(&target->i_mutex);
3259 dput(new_dentry);
3260 if (!error)
3261 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
3262 d_move(old_dentry,new_dentry);
3263 return error;
3264}
3265
3266static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
3267 struct inode *new_dir, struct dentry *new_dentry)
3268{
3269 struct inode *target = new_dentry->d_inode;
3270 int error;
3271
3272 error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
3273 if (error)
3274 return error;
3275
3276 dget(new_dentry);
3277 if (target)
3278 mutex_lock(&target->i_mutex);
3279
3280 error = -EBUSY;
3281 if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
3282 goto out;
3283
3284 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
3285 if (error)
3286 goto out;
3287
3288 if (target)
3289 dont_mount(new_dentry);
3290 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
3291 d_move(old_dentry, new_dentry);
3292out:
3293 if (target)
3294 mutex_unlock(&target->i_mutex);
3295 dput(new_dentry);
3296 return error;
3297}
3298
3299int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
3300 struct inode *new_dir, struct dentry *new_dentry)
3301{
3302 int error;
3303 int is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
3304 const unsigned char *old_name;
3305
3306 if (old_dentry->d_inode == new_dentry->d_inode)
3307 return 0;
3308
3309 error = may_delete(old_dir, old_dentry, is_dir);
3310 if (error)
3311 return error;
3312
3313 if (!new_dentry->d_inode)
3314 error = may_create(new_dir, new_dentry);
3315 else
3316 error = may_delete(new_dir, new_dentry, is_dir);
3317 if (error)
3318 return error;
3319
3320 if (!old_dir->i_op->rename)
3321 return -EPERM;
3322
3323 old_name = fsnotify_oldname_init(old_dentry->d_name.name);
3324
3325 if (is_dir)
3326 error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry);
3327 else
3328 error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
3329 if (!error)
3330 fsnotify_move(old_dir, new_dir, old_name, is_dir,
3331 new_dentry->d_inode, old_dentry);
3332 fsnotify_oldname_free(old_name);
3333
3334 return error;
3335}
3336
3337SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
3338 int, newdfd, const char __user *, newname)
3339{
3340 struct dentry *old_dir, *new_dir;
3341 struct dentry *old_dentry, *new_dentry;
3342 struct dentry *trap;
3343 struct nameidata oldnd, newnd;
3344 char *from;
3345 char *to;
3346 int error;
3347
3348 error = user_path_parent(olddfd, oldname, &oldnd, &from);
3349 if (error)
3350 goto exit;
3351
3352 error = user_path_parent(newdfd, newname, &newnd, &to);
3353 if (error)
3354 goto exit1;
3355
3356 error = -EXDEV;
3357 if (oldnd.path.mnt != newnd.path.mnt)
3358 goto exit2;
3359
3360 old_dir = oldnd.path.dentry;
3361 error = -EBUSY;
3362 if (oldnd.last_type != LAST_NORM)
3363 goto exit2;
3364
3365 new_dir = newnd.path.dentry;
3366 if (newnd.last_type != LAST_NORM)
3367 goto exit2;
3368
3369 oldnd.flags &= ~LOOKUP_PARENT;
3370 newnd.flags &= ~LOOKUP_PARENT;
3371 newnd.flags |= LOOKUP_RENAME_TARGET;
3372
3373 trap = lock_rename(new_dir, old_dir);
3374
3375 old_dentry = lookup_hash(&oldnd);
3376 error = PTR_ERR(old_dentry);
3377 if (IS_ERR(old_dentry))
3378 goto exit3;
3379 /* source must exist */
3380 error = -ENOENT;
3381 if (!old_dentry->d_inode)
3382 goto exit4;
3383 /* unless the source is a directory trailing slashes give -ENOTDIR */
3384 if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
3385 error = -ENOTDIR;
3386 if (oldnd.last.name[oldnd.last.len])
3387 goto exit4;
3388 if (newnd.last.name[newnd.last.len])
3389 goto exit4;
3390 }
3391 /* source should not be ancestor of target */
3392 error = -EINVAL;
3393 if (old_dentry == trap)
3394 goto exit4;
3395 new_dentry = lookup_hash(&newnd);
3396 error = PTR_ERR(new_dentry);
3397 if (IS_ERR(new_dentry))
3398 goto exit4;
3399 /* target should not be an ancestor of source */
3400 error = -ENOTEMPTY;
3401 if (new_dentry == trap)
3402 goto exit5;
3403
3404 error = mnt_want_write(oldnd.path.mnt);
3405 if (error)
3406 goto exit5;
3407 error = security_path_rename(&oldnd.path, old_dentry,
3408 &newnd.path, new_dentry);
3409 if (error)
3410 goto exit6;
3411 error = vfs_rename(old_dir->d_inode, old_dentry,
3412 new_dir->d_inode, new_dentry);
3413exit6:
3414 mnt_drop_write(oldnd.path.mnt);
3415exit5:
3416 dput(new_dentry);
3417exit4:
3418 dput(old_dentry);
3419exit3:
3420 unlock_rename(new_dir, old_dir);
3421exit2:
3422 path_put(&newnd.path);
3423 putname(to);
3424exit1:
3425 path_put(&oldnd.path);
3426 putname(from);
3427exit:
3428 return error;
3429}
3430
3431SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
3432{
3433 return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname);
3434}
3435
3436int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
3437{
3438 int len;
3439
3440 len = PTR_ERR(link);
3441 if (IS_ERR(link))
3442 goto out;
3443
3444 len = strlen(link);
3445 if (len > (unsigned) buflen)
3446 len = buflen;
3447 if (copy_to_user(buffer, link, len))
3448 len = -EFAULT;
3449out:
3450 return len;
3451}
3452
3453/*
3454 * A helper for ->readlink(). This should be used *ONLY* for symlinks that
3455 * have ->follow_link() touching nd only in nd_set_link(). Using (or not
3456 * using) it for any given inode is up to filesystem.
3457 */
3458int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
3459{
3460 struct nameidata nd;
3461 void *cookie;
3462 int res;
3463
3464 nd.depth = 0;
3465 cookie = dentry->d_inode->i_op->follow_link(dentry, &nd);
3466 if (IS_ERR(cookie))
3467 return PTR_ERR(cookie);
3468
3469 res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd));
3470 if (dentry->d_inode->i_op->put_link)
3471 dentry->d_inode->i_op->put_link(dentry, &nd, cookie);
3472 return res;
3473}
3474
3475int vfs_follow_link(struct nameidata *nd, const char *link)
3476{
3477 return __vfs_follow_link(nd, link);
3478}
3479
3480/* get the link contents into pagecache */
3481static char *page_getlink(struct dentry * dentry, struct page **ppage)
3482{
3483 char *kaddr;
3484 struct page *page;
3485 struct address_space *mapping = dentry->d_inode->i_mapping;
3486 page = read_mapping_page(mapping, 0, NULL);
3487 if (IS_ERR(page))
3488 return (char*)page;
3489 *ppage = page;
3490 kaddr = kmap(page);
3491 nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1);
3492 return kaddr;
3493}
3494
3495int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
3496{
3497 struct page *page = NULL;
3498 char *s = page_getlink(dentry, &page);
3499 int res = vfs_readlink(dentry,buffer,buflen,s);
3500 if (page) {
3501 kunmap(page);
3502 page_cache_release(page);
3503 }
3504 return res;
3505}
3506
3507void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
3508{
3509 struct page *page = NULL;
3510 nd_set_link(nd, page_getlink(dentry, &page));
3511 return page;
3512}
3513
3514void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
3515{
3516 struct page *page = cookie;
3517
3518 if (page) {
3519 kunmap(page);
3520 page_cache_release(page);
3521 }
3522}
3523
3524/*
3525 * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
3526 */
3527int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
3528{
3529 struct address_space *mapping = inode->i_mapping;
3530 struct page *page;
3531 void *fsdata;
3532 int err;
3533 char *kaddr;
3534 unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE;
3535 if (nofs)
3536 flags |= AOP_FLAG_NOFS;
3537
3538retry:
3539 err = pagecache_write_begin(NULL, mapping, 0, len-1,
3540 flags, &page, &fsdata);
3541 if (err)
3542 goto fail;
3543
3544 kaddr = kmap_atomic(page);
3545 memcpy(kaddr, symname, len-1);
3546 kunmap_atomic(kaddr);
3547
3548 err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
3549 page, fsdata);
3550 if (err < 0)
3551 goto fail;
3552 if (err < len-1)
3553 goto retry;
3554
3555 mark_inode_dirty(inode);
3556 return 0;
3557fail:
3558 return err;
3559}
3560
3561int page_symlink(struct inode *inode, const char *symname, int len)
3562{
3563 return __page_symlink(inode, symname, len,
3564 !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS));
3565}
3566
3567const struct inode_operations page_symlink_inode_operations = {
3568 .readlink = generic_readlink,
3569 .follow_link = page_follow_link_light,
3570 .put_link = page_put_link,
3571};
3572
3573EXPORT_SYMBOL(user_path_at);
3574EXPORT_SYMBOL(follow_down_one);
3575EXPORT_SYMBOL(follow_down);
3576EXPORT_SYMBOL(follow_up);
3577EXPORT_SYMBOL(get_write_access); /* binfmt_aout */
3578EXPORT_SYMBOL(getname);
3579EXPORT_SYMBOL(lock_rename);
3580EXPORT_SYMBOL(lookup_one_len);
3581EXPORT_SYMBOL(page_follow_link_light);
3582EXPORT_SYMBOL(page_put_link);
3583EXPORT_SYMBOL(page_readlink);
3584EXPORT_SYMBOL(__page_symlink);
3585EXPORT_SYMBOL(page_symlink);
3586EXPORT_SYMBOL(page_symlink_inode_operations);
3587EXPORT_SYMBOL(kern_path);
3588EXPORT_SYMBOL(vfs_path_lookup);
3589EXPORT_SYMBOL(inode_permission);
3590EXPORT_SYMBOL(unlock_rename);
3591EXPORT_SYMBOL(vfs_create);
3592EXPORT_SYMBOL(vfs_follow_link);
3593EXPORT_SYMBOL(vfs_link);
3594EXPORT_SYMBOL(vfs_mkdir);
3595EXPORT_SYMBOL(vfs_mknod);
3596EXPORT_SYMBOL(generic_permission);
3597EXPORT_SYMBOL(vfs_readlink);
3598EXPORT_SYMBOL(vfs_rename);
3599EXPORT_SYMBOL(vfs_rmdir);
3600EXPORT_SYMBOL(vfs_symlink);
3601EXPORT_SYMBOL(vfs_unlink);
3602EXPORT_SYMBOL(dentry_unhash);
3603EXPORT_SYMBOL(generic_readlink);