4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/idr.h>
19 #include <linux/init.h> /* init_rootfs */
20 #include <linux/fs_struct.h> /* get_fs_root et.al. */
21 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
22 #include <linux/uaccess.h>
23 #include <linux/proc_ns.h>
24 #include <linux/magic.h>
25 #include <linux/bootmem.h>
26 #include <linux/task_work.h>
30 /* Maximum number of mounts in a mount namespace */
31 unsigned int sysctl_mount_max __read_mostly
= 100000;
33 static unsigned int m_hash_mask __read_mostly
;
34 static unsigned int m_hash_shift __read_mostly
;
35 static unsigned int mp_hash_mask __read_mostly
;
36 static unsigned int mp_hash_shift __read_mostly
;
38 static __initdata
unsigned long mhash_entries
;
39 static int __init
set_mhash_entries(char *str
)
43 mhash_entries
= simple_strtoul(str
, &str
, 0);
46 __setup("mhash_entries=", set_mhash_entries
);
48 static __initdata
unsigned long mphash_entries
;
49 static int __init
set_mphash_entries(char *str
)
53 mphash_entries
= simple_strtoul(str
, &str
, 0);
56 __setup("mphash_entries=", set_mphash_entries
);
59 static DEFINE_IDA(mnt_id_ida
);
60 static DEFINE_IDA(mnt_group_ida
);
61 static DEFINE_SPINLOCK(mnt_id_lock
);
62 static int mnt_id_start
= 0;
63 static int mnt_group_start
= 1;
65 static struct hlist_head
*mount_hashtable __read_mostly
;
66 static struct hlist_head
*mountpoint_hashtable __read_mostly
;
67 static struct kmem_cache
*mnt_cache __read_mostly
;
68 static DECLARE_RWSEM(namespace_sem
);
71 struct kobject
*fs_kobj
;
72 EXPORT_SYMBOL_GPL(fs_kobj
);
75 * vfsmount lock may be taken for read to prevent changes to the
76 * vfsmount hash, ie. during mountpoint lookups or walking back
79 * It should be taken for write in all cases where the vfsmount
80 * tree or hash is modified or when a vfsmount structure is modified.
82 __cacheline_aligned_in_smp
DEFINE_SEQLOCK(mount_lock
);
84 static inline struct hlist_head
*m_hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
86 unsigned long tmp
= ((unsigned long)mnt
/ L1_CACHE_BYTES
);
87 tmp
+= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
88 tmp
= tmp
+ (tmp
>> m_hash_shift
);
89 return &mount_hashtable
[tmp
& m_hash_mask
];
92 static inline struct hlist_head
*mp_hash(struct dentry
*dentry
)
94 unsigned long tmp
= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
95 tmp
= tmp
+ (tmp
>> mp_hash_shift
);
96 return &mountpoint_hashtable
[tmp
& mp_hash_mask
];
100 * allocation is serialized by namespace_sem, but we need the spinlock to
101 * serialize with freeing.
103 static int mnt_alloc_id(struct mount
*mnt
)
108 ida_pre_get(&mnt_id_ida
, GFP_KERNEL
);
109 spin_lock(&mnt_id_lock
);
110 res
= ida_get_new_above(&mnt_id_ida
, mnt_id_start
, &mnt
->mnt_id
);
112 mnt_id_start
= mnt
->mnt_id
+ 1;
113 spin_unlock(&mnt_id_lock
);
120 static void mnt_free_id(struct mount
*mnt
)
122 int id
= mnt
->mnt_id
;
123 spin_lock(&mnt_id_lock
);
124 ida_remove(&mnt_id_ida
, id
);
125 if (mnt_id_start
> id
)
127 spin_unlock(&mnt_id_lock
);
131 * Allocate a new peer group ID
133 * mnt_group_ida is protected by namespace_sem
135 static int mnt_alloc_group_id(struct mount
*mnt
)
139 if (!ida_pre_get(&mnt_group_ida
, GFP_KERNEL
))
142 res
= ida_get_new_above(&mnt_group_ida
,
146 mnt_group_start
= mnt
->mnt_group_id
+ 1;
152 * Release a peer group ID
154 void mnt_release_group_id(struct mount
*mnt
)
156 int id
= mnt
->mnt_group_id
;
157 ida_remove(&mnt_group_ida
, id
);
158 if (mnt_group_start
> id
)
159 mnt_group_start
= id
;
160 mnt
->mnt_group_id
= 0;
164 * vfsmount lock must be held for read
166 static inline void mnt_add_count(struct mount
*mnt
, int n
)
169 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, n
);
178 * vfsmount lock must be held for write
180 unsigned int mnt_get_count(struct mount
*mnt
)
183 unsigned int count
= 0;
186 for_each_possible_cpu(cpu
) {
187 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_count
;
192 return mnt
->mnt_count
;
196 static void drop_mountpoint(struct fs_pin
*p
)
198 struct mount
*m
= container_of(p
, struct mount
, mnt_umount
);
199 dput(m
->mnt_ex_mountpoint
);
204 static struct mount
*alloc_vfsmnt(const char *name
)
206 struct mount
*mnt
= kmem_cache_zalloc(mnt_cache
, GFP_KERNEL
);
210 err
= mnt_alloc_id(mnt
);
215 mnt
->mnt_devname
= kstrdup_const(name
, GFP_KERNEL
);
216 if (!mnt
->mnt_devname
)
221 mnt
->mnt_pcp
= alloc_percpu(struct mnt_pcp
);
223 goto out_free_devname
;
225 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, 1);
228 mnt
->mnt_writers
= 0;
231 INIT_HLIST_NODE(&mnt
->mnt_hash
);
232 INIT_LIST_HEAD(&mnt
->mnt_child
);
233 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
234 INIT_LIST_HEAD(&mnt
->mnt_list
);
235 INIT_LIST_HEAD(&mnt
->mnt_expire
);
236 INIT_LIST_HEAD(&mnt
->mnt_share
);
237 INIT_LIST_HEAD(&mnt
->mnt_slave_list
);
238 INIT_LIST_HEAD(&mnt
->mnt_slave
);
239 INIT_HLIST_NODE(&mnt
->mnt_mp_list
);
240 #ifdef CONFIG_FSNOTIFY
241 INIT_HLIST_HEAD(&mnt
->mnt_fsnotify_marks
);
243 init_fs_pin(&mnt
->mnt_umount
, drop_mountpoint
);
249 kfree_const(mnt
->mnt_devname
);
254 kmem_cache_free(mnt_cache
, mnt
);
259 * Most r/o checks on a fs are for operations that take
260 * discrete amounts of time, like a write() or unlink().
261 * We must keep track of when those operations start
262 * (for permission checks) and when they end, so that
263 * we can determine when writes are able to occur to
267 * __mnt_is_readonly: check whether a mount is read-only
268 * @mnt: the mount to check for its write status
270 * This shouldn't be used directly ouside of the VFS.
271 * It does not guarantee that the filesystem will stay
272 * r/w, just that it is right *now*. This can not and
273 * should not be used in place of IS_RDONLY(inode).
274 * mnt_want/drop_write() will _keep_ the filesystem
277 int __mnt_is_readonly(struct vfsmount
*mnt
)
279 if (mnt
->mnt_flags
& MNT_READONLY
)
281 if (mnt
->mnt_sb
->s_flags
& MS_RDONLY
)
285 EXPORT_SYMBOL_GPL(__mnt_is_readonly
);
287 static inline void mnt_inc_writers(struct mount
*mnt
)
290 this_cpu_inc(mnt
->mnt_pcp
->mnt_writers
);
296 static inline void mnt_dec_writers(struct mount
*mnt
)
299 this_cpu_dec(mnt
->mnt_pcp
->mnt_writers
);
305 static unsigned int mnt_get_writers(struct mount
*mnt
)
308 unsigned int count
= 0;
311 for_each_possible_cpu(cpu
) {
312 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_writers
;
317 return mnt
->mnt_writers
;
321 static int mnt_is_readonly(struct vfsmount
*mnt
)
323 if (mnt
->mnt_sb
->s_readonly_remount
)
325 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
327 return __mnt_is_readonly(mnt
);
331 * Most r/o & frozen checks on a fs are for operations that take discrete
332 * amounts of time, like a write() or unlink(). We must keep track of when
333 * those operations start (for permission checks) and when they end, so that we
334 * can determine when writes are able to occur to a filesystem.
337 * __mnt_want_write - get write access to a mount without freeze protection
338 * @m: the mount on which to take a write
340 * This tells the low-level filesystem that a write is about to be performed to
341 * it, and makes sure that writes are allowed (mnt it read-write) before
342 * returning success. This operation does not protect against filesystem being
343 * frozen. When the write operation is finished, __mnt_drop_write() must be
344 * called. This is effectively a refcount.
346 int __mnt_want_write(struct vfsmount
*m
)
348 struct mount
*mnt
= real_mount(m
);
352 mnt_inc_writers(mnt
);
354 * The store to mnt_inc_writers must be visible before we pass
355 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
356 * incremented count after it has set MNT_WRITE_HOLD.
359 while (ACCESS_ONCE(mnt
->mnt
.mnt_flags
) & MNT_WRITE_HOLD
)
362 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
363 * be set to match its requirements. So we must not load that until
364 * MNT_WRITE_HOLD is cleared.
367 if (mnt_is_readonly(m
)) {
368 mnt_dec_writers(mnt
);
377 * mnt_want_write - get write access to a mount
378 * @m: the mount on which to take a write
380 * This tells the low-level filesystem that a write is about to be performed to
381 * it, and makes sure that writes are allowed (mount is read-write, filesystem
382 * is not frozen) before returning success. When the write operation is
383 * finished, mnt_drop_write() must be called. This is effectively a refcount.
385 int mnt_want_write(struct vfsmount
*m
)
389 sb_start_write(m
->mnt_sb
);
390 ret
= __mnt_want_write(m
);
392 sb_end_write(m
->mnt_sb
);
395 EXPORT_SYMBOL_GPL(mnt_want_write
);
398 * mnt_clone_write - get write access to a mount
399 * @mnt: the mount on which to take a write
401 * This is effectively like mnt_want_write, except
402 * it must only be used to take an extra write reference
403 * on a mountpoint that we already know has a write reference
404 * on it. This allows some optimisation.
406 * After finished, mnt_drop_write must be called as usual to
407 * drop the reference.
409 int mnt_clone_write(struct vfsmount
*mnt
)
411 /* superblock may be r/o */
412 if (__mnt_is_readonly(mnt
))
415 mnt_inc_writers(real_mount(mnt
));
419 EXPORT_SYMBOL_GPL(mnt_clone_write
);
422 * __mnt_want_write_file - get write access to a file's mount
423 * @file: the file who's mount on which to take a write
425 * This is like __mnt_want_write, but it takes a file and can
426 * do some optimisations if the file is open for write already
428 int __mnt_want_write_file(struct file
*file
)
430 if (!(file
->f_mode
& FMODE_WRITER
))
431 return __mnt_want_write(file
->f_path
.mnt
);
433 return mnt_clone_write(file
->f_path
.mnt
);
437 * mnt_want_write_file - get write access to a file's mount
438 * @file: the file who's mount on which to take a write
440 * This is like mnt_want_write, but it takes a file and can
441 * do some optimisations if the file is open for write already
443 int mnt_want_write_file(struct file
*file
)
447 sb_start_write(file
->f_path
.mnt
->mnt_sb
);
448 ret
= __mnt_want_write_file(file
);
450 sb_end_write(file
->f_path
.mnt
->mnt_sb
);
453 EXPORT_SYMBOL_GPL(mnt_want_write_file
);
456 * __mnt_drop_write - give up write access to a mount
457 * @mnt: the mount on which to give up write access
459 * Tells the low-level filesystem that we are done
460 * performing writes to it. Must be matched with
461 * __mnt_want_write() call above.
463 void __mnt_drop_write(struct vfsmount
*mnt
)
466 mnt_dec_writers(real_mount(mnt
));
469 EXPORT_SYMBOL_GPL(__mnt_drop_write
);
472 * mnt_drop_write - give up write access to a mount
473 * @mnt: the mount on which to give up write access
475 * Tells the low-level filesystem that we are done performing writes to it and
476 * also allows filesystem to be frozen again. Must be matched with
477 * mnt_want_write() call above.
479 void mnt_drop_write(struct vfsmount
*mnt
)
481 __mnt_drop_write(mnt
);
482 sb_end_write(mnt
->mnt_sb
);
484 EXPORT_SYMBOL_GPL(mnt_drop_write
);
486 void __mnt_drop_write_file(struct file
*file
)
488 __mnt_drop_write(file
->f_path
.mnt
);
491 void mnt_drop_write_file(struct file
*file
)
493 mnt_drop_write(file
->f_path
.mnt
);
495 EXPORT_SYMBOL(mnt_drop_write_file
);
497 static int mnt_make_readonly(struct mount
*mnt
)
502 mnt
->mnt
.mnt_flags
|= MNT_WRITE_HOLD
;
504 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
505 * should be visible before we do.
510 * With writers on hold, if this value is zero, then there are
511 * definitely no active writers (although held writers may subsequently
512 * increment the count, they'll have to wait, and decrement it after
513 * seeing MNT_READONLY).
515 * It is OK to have counter incremented on one CPU and decremented on
516 * another: the sum will add up correctly. The danger would be when we
517 * sum up each counter, if we read a counter before it is incremented,
518 * but then read another CPU's count which it has been subsequently
519 * decremented from -- we would see more decrements than we should.
520 * MNT_WRITE_HOLD protects against this scenario, because
521 * mnt_want_write first increments count, then smp_mb, then spins on
522 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
523 * we're counting up here.
525 if (mnt_get_writers(mnt
) > 0)
528 mnt
->mnt
.mnt_flags
|= MNT_READONLY
;
530 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
531 * that become unheld will see MNT_READONLY.
534 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
539 static void __mnt_unmake_readonly(struct mount
*mnt
)
542 mnt
->mnt
.mnt_flags
&= ~MNT_READONLY
;
546 int sb_prepare_remount_readonly(struct super_block
*sb
)
551 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
552 if (atomic_long_read(&sb
->s_remove_count
))
556 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
557 if (!(mnt
->mnt
.mnt_flags
& MNT_READONLY
)) {
558 mnt
->mnt
.mnt_flags
|= MNT_WRITE_HOLD
;
560 if (mnt_get_writers(mnt
) > 0) {
566 if (!err
&& atomic_long_read(&sb
->s_remove_count
))
570 sb
->s_readonly_remount
= 1;
573 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
574 if (mnt
->mnt
.mnt_flags
& MNT_WRITE_HOLD
)
575 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
582 static void free_vfsmnt(struct mount
*mnt
)
584 kfree_const(mnt
->mnt_devname
);
586 free_percpu(mnt
->mnt_pcp
);
588 kmem_cache_free(mnt_cache
, mnt
);
591 static void delayed_free_vfsmnt(struct rcu_head
*head
)
593 free_vfsmnt(container_of(head
, struct mount
, mnt_rcu
));
596 /* call under rcu_read_lock */
597 int __legitimize_mnt(struct vfsmount
*bastard
, unsigned seq
)
600 if (read_seqretry(&mount_lock
, seq
))
604 mnt
= real_mount(bastard
);
605 mnt_add_count(mnt
, 1);
606 if (likely(!read_seqretry(&mount_lock
, seq
)))
608 if (bastard
->mnt_flags
& MNT_SYNC_UMOUNT
) {
609 mnt_add_count(mnt
, -1);
615 /* call under rcu_read_lock */
616 bool legitimize_mnt(struct vfsmount
*bastard
, unsigned seq
)
618 int res
= __legitimize_mnt(bastard
, seq
);
621 if (unlikely(res
< 0)) {
630 * find the first mount at @dentry on vfsmount @mnt.
631 * call under rcu_read_lock()
633 struct mount
*__lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
)
635 struct hlist_head
*head
= m_hash(mnt
, dentry
);
638 hlist_for_each_entry_rcu(p
, head
, mnt_hash
)
639 if (&p
->mnt_parent
->mnt
== mnt
&& p
->mnt_mountpoint
== dentry
)
645 * find the last mount at @dentry on vfsmount @mnt.
646 * mount_lock must be held.
648 struct mount
*__lookup_mnt_last(struct vfsmount
*mnt
, struct dentry
*dentry
)
650 struct mount
*p
, *res
= NULL
;
651 p
= __lookup_mnt(mnt
, dentry
);
654 if (!(p
->mnt
.mnt_flags
& MNT_UMOUNT
))
656 hlist_for_each_entry_continue(p
, mnt_hash
) {
657 if (&p
->mnt_parent
->mnt
!= mnt
|| p
->mnt_mountpoint
!= dentry
)
659 if (!(p
->mnt
.mnt_flags
& MNT_UMOUNT
))
667 * lookup_mnt - Return the first child mount mounted at path
669 * "First" means first mounted chronologically. If you create the
672 * mount /dev/sda1 /mnt
673 * mount /dev/sda2 /mnt
674 * mount /dev/sda3 /mnt
676 * Then lookup_mnt() on the base /mnt dentry in the root mount will
677 * return successively the root dentry and vfsmount of /dev/sda1, then
678 * /dev/sda2, then /dev/sda3, then NULL.
680 * lookup_mnt takes a reference to the found vfsmount.
682 struct vfsmount
*lookup_mnt(struct path
*path
)
684 struct mount
*child_mnt
;
690 seq
= read_seqbegin(&mount_lock
);
691 child_mnt
= __lookup_mnt(path
->mnt
, path
->dentry
);
692 m
= child_mnt
? &child_mnt
->mnt
: NULL
;
693 } while (!legitimize_mnt(m
, seq
));
699 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
700 * current mount namespace.
702 * The common case is dentries are not mountpoints at all and that
703 * test is handled inline. For the slow case when we are actually
704 * dealing with a mountpoint of some kind, walk through all of the
705 * mounts in the current mount namespace and test to see if the dentry
708 * The mount_hashtable is not usable in the context because we
709 * need to identify all mounts that may be in the current mount
710 * namespace not just a mount that happens to have some specified
713 bool __is_local_mountpoint(struct dentry
*dentry
)
715 struct mnt_namespace
*ns
= current
->nsproxy
->mnt_ns
;
717 bool is_covered
= false;
719 if (!d_mountpoint(dentry
))
722 down_read(&namespace_sem
);
723 list_for_each_entry(mnt
, &ns
->list
, mnt_list
) {
724 is_covered
= (mnt
->mnt_mountpoint
== dentry
);
728 up_read(&namespace_sem
);
733 static struct mountpoint
*lookup_mountpoint(struct dentry
*dentry
)
735 struct hlist_head
*chain
= mp_hash(dentry
);
736 struct mountpoint
*mp
;
738 hlist_for_each_entry(mp
, chain
, m_hash
) {
739 if (mp
->m_dentry
== dentry
) {
740 /* might be worth a WARN_ON() */
741 if (d_unlinked(dentry
))
742 return ERR_PTR(-ENOENT
);
750 static struct mountpoint
*new_mountpoint(struct dentry
*dentry
)
752 struct hlist_head
*chain
= mp_hash(dentry
);
753 struct mountpoint
*mp
;
756 mp
= kmalloc(sizeof(struct mountpoint
), GFP_KERNEL
);
758 return ERR_PTR(-ENOMEM
);
760 ret
= d_set_mounted(dentry
);
766 mp
->m_dentry
= dentry
;
768 hlist_add_head(&mp
->m_hash
, chain
);
769 INIT_HLIST_HEAD(&mp
->m_list
);
773 static void put_mountpoint(struct mountpoint
*mp
)
775 if (!--mp
->m_count
) {
776 struct dentry
*dentry
= mp
->m_dentry
;
777 BUG_ON(!hlist_empty(&mp
->m_list
));
778 spin_lock(&dentry
->d_lock
);
779 dentry
->d_flags
&= ~DCACHE_MOUNTED
;
780 spin_unlock(&dentry
->d_lock
);
781 hlist_del(&mp
->m_hash
);
786 static inline int check_mnt(struct mount
*mnt
)
788 return mnt
->mnt_ns
== current
->nsproxy
->mnt_ns
;
792 * vfsmount lock must be held for write
794 static void touch_mnt_namespace(struct mnt_namespace
*ns
)
798 wake_up_interruptible(&ns
->poll
);
803 * vfsmount lock must be held for write
805 static void __touch_mnt_namespace(struct mnt_namespace
*ns
)
807 if (ns
&& ns
->event
!= event
) {
809 wake_up_interruptible(&ns
->poll
);
814 * vfsmount lock must be held for write
816 static void unhash_mnt(struct mount
*mnt
)
818 mnt
->mnt_parent
= mnt
;
819 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
820 list_del_init(&mnt
->mnt_child
);
821 hlist_del_init_rcu(&mnt
->mnt_hash
);
822 hlist_del_init(&mnt
->mnt_mp_list
);
823 put_mountpoint(mnt
->mnt_mp
);
828 * vfsmount lock must be held for write
830 static void detach_mnt(struct mount
*mnt
, struct path
*old_path
)
832 old_path
->dentry
= mnt
->mnt_mountpoint
;
833 old_path
->mnt
= &mnt
->mnt_parent
->mnt
;
838 * vfsmount lock must be held for write
840 static void umount_mnt(struct mount
*mnt
)
842 /* old mountpoint will be dropped when we can do that */
843 mnt
->mnt_ex_mountpoint
= mnt
->mnt_mountpoint
;
848 * vfsmount lock must be held for write
850 void mnt_set_mountpoint(struct mount
*mnt
,
851 struct mountpoint
*mp
,
852 struct mount
*child_mnt
)
855 mnt_add_count(mnt
, 1); /* essentially, that's mntget */
856 child_mnt
->mnt_mountpoint
= dget(mp
->m_dentry
);
857 child_mnt
->mnt_parent
= mnt
;
858 child_mnt
->mnt_mp
= mp
;
859 hlist_add_head(&child_mnt
->mnt_mp_list
, &mp
->m_list
);
863 * vfsmount lock must be held for write
865 static void attach_mnt(struct mount
*mnt
,
866 struct mount
*parent
,
867 struct mountpoint
*mp
)
869 mnt_set_mountpoint(parent
, mp
, mnt
);
870 hlist_add_head_rcu(&mnt
->mnt_hash
, m_hash(&parent
->mnt
, mp
->m_dentry
));
871 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
874 static void attach_shadowed(struct mount
*mnt
,
875 struct mount
*parent
,
876 struct mount
*shadows
)
879 hlist_add_behind_rcu(&mnt
->mnt_hash
, &shadows
->mnt_hash
);
880 list_add(&mnt
->mnt_child
, &shadows
->mnt_child
);
882 hlist_add_head_rcu(&mnt
->mnt_hash
,
883 m_hash(&parent
->mnt
, mnt
->mnt_mountpoint
));
884 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
889 * vfsmount lock must be held for write
891 static void commit_tree(struct mount
*mnt
, struct mount
*shadows
)
893 struct mount
*parent
= mnt
->mnt_parent
;
896 struct mnt_namespace
*n
= parent
->mnt_ns
;
898 BUG_ON(parent
== mnt
);
900 list_add_tail(&head
, &mnt
->mnt_list
);
901 list_for_each_entry(m
, &head
, mnt_list
)
904 list_splice(&head
, n
->list
.prev
);
906 n
->mounts
+= n
->pending_mounts
;
907 n
->pending_mounts
= 0;
909 attach_shadowed(mnt
, parent
, shadows
);
910 touch_mnt_namespace(n
);
913 static struct mount
*next_mnt(struct mount
*p
, struct mount
*root
)
915 struct list_head
*next
= p
->mnt_mounts
.next
;
916 if (next
== &p
->mnt_mounts
) {
920 next
= p
->mnt_child
.next
;
921 if (next
!= &p
->mnt_parent
->mnt_mounts
)
926 return list_entry(next
, struct mount
, mnt_child
);
929 static struct mount
*skip_mnt_tree(struct mount
*p
)
931 struct list_head
*prev
= p
->mnt_mounts
.prev
;
932 while (prev
!= &p
->mnt_mounts
) {
933 p
= list_entry(prev
, struct mount
, mnt_child
);
934 prev
= p
->mnt_mounts
.prev
;
940 vfs_kern_mount(struct file_system_type
*type
, int flags
, const char *name
, void *data
)
946 return ERR_PTR(-ENODEV
);
948 mnt
= alloc_vfsmnt(name
);
950 return ERR_PTR(-ENOMEM
);
952 if (flags
& MS_KERNMOUNT
)
953 mnt
->mnt
.mnt_flags
= MNT_INTERNAL
;
955 root
= mount_fs(type
, flags
, name
, data
);
959 return ERR_CAST(root
);
962 mnt
->mnt
.mnt_root
= root
;
963 mnt
->mnt
.mnt_sb
= root
->d_sb
;
964 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
965 mnt
->mnt_parent
= mnt
;
967 list_add_tail(&mnt
->mnt_instance
, &root
->d_sb
->s_mounts
);
971 EXPORT_SYMBOL_GPL(vfs_kern_mount
);
973 static struct mount
*clone_mnt(struct mount
*old
, struct dentry
*root
,
976 struct super_block
*sb
= old
->mnt
.mnt_sb
;
980 mnt
= alloc_vfsmnt(old
->mnt_devname
);
982 return ERR_PTR(-ENOMEM
);
984 if (flag
& (CL_SLAVE
| CL_PRIVATE
| CL_SHARED_TO_SLAVE
))
985 mnt
->mnt_group_id
= 0; /* not a peer of original */
987 mnt
->mnt_group_id
= old
->mnt_group_id
;
989 if ((flag
& CL_MAKE_SHARED
) && !mnt
->mnt_group_id
) {
990 err
= mnt_alloc_group_id(mnt
);
995 mnt
->mnt
.mnt_flags
= old
->mnt
.mnt_flags
& ~(MNT_WRITE_HOLD
|MNT_MARKED
);
996 /* Don't allow unprivileged users to change mount flags */
997 if (flag
& CL_UNPRIVILEGED
) {
998 mnt
->mnt
.mnt_flags
|= MNT_LOCK_ATIME
;
1000 if (mnt
->mnt
.mnt_flags
& MNT_READONLY
)
1001 mnt
->mnt
.mnt_flags
|= MNT_LOCK_READONLY
;
1003 if (mnt
->mnt
.mnt_flags
& MNT_NODEV
)
1004 mnt
->mnt
.mnt_flags
|= MNT_LOCK_NODEV
;
1006 if (mnt
->mnt
.mnt_flags
& MNT_NOSUID
)
1007 mnt
->mnt
.mnt_flags
|= MNT_LOCK_NOSUID
;
1009 if (mnt
->mnt
.mnt_flags
& MNT_NOEXEC
)
1010 mnt
->mnt
.mnt_flags
|= MNT_LOCK_NOEXEC
;
1013 /* Don't allow unprivileged users to reveal what is under a mount */
1014 if ((flag
& CL_UNPRIVILEGED
) &&
1015 (!(flag
& CL_EXPIRE
) || list_empty(&old
->mnt_expire
)))
1016 mnt
->mnt
.mnt_flags
|= MNT_LOCKED
;
1018 atomic_inc(&sb
->s_active
);
1019 mnt
->mnt
.mnt_sb
= sb
;
1020 mnt
->mnt
.mnt_root
= dget(root
);
1021 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
1022 mnt
->mnt_parent
= mnt
;
1024 list_add_tail(&mnt
->mnt_instance
, &sb
->s_mounts
);
1025 unlock_mount_hash();
1027 if ((flag
& CL_SLAVE
) ||
1028 ((flag
& CL_SHARED_TO_SLAVE
) && IS_MNT_SHARED(old
))) {
1029 list_add(&mnt
->mnt_slave
, &old
->mnt_slave_list
);
1030 mnt
->mnt_master
= old
;
1031 CLEAR_MNT_SHARED(mnt
);
1032 } else if (!(flag
& CL_PRIVATE
)) {
1033 if ((flag
& CL_MAKE_SHARED
) || IS_MNT_SHARED(old
))
1034 list_add(&mnt
->mnt_share
, &old
->mnt_share
);
1035 if (IS_MNT_SLAVE(old
))
1036 list_add(&mnt
->mnt_slave
, &old
->mnt_slave
);
1037 mnt
->mnt_master
= old
->mnt_master
;
1039 if (flag
& CL_MAKE_SHARED
)
1040 set_mnt_shared(mnt
);
1042 /* stick the duplicate mount on the same expiry list
1043 * as the original if that was on one */
1044 if (flag
& CL_EXPIRE
) {
1045 if (!list_empty(&old
->mnt_expire
))
1046 list_add(&mnt
->mnt_expire
, &old
->mnt_expire
);
1054 return ERR_PTR(err
);
1057 static void cleanup_mnt(struct mount
*mnt
)
1060 * This probably indicates that somebody messed
1061 * up a mnt_want/drop_write() pair. If this
1062 * happens, the filesystem was probably unable
1063 * to make r/w->r/o transitions.
1066 * The locking used to deal with mnt_count decrement provides barriers,
1067 * so mnt_get_writers() below is safe.
1069 WARN_ON(mnt_get_writers(mnt
));
1070 if (unlikely(mnt
->mnt_pins
.first
))
1072 fsnotify_vfsmount_delete(&mnt
->mnt
);
1073 dput(mnt
->mnt
.mnt_root
);
1074 deactivate_super(mnt
->mnt
.mnt_sb
);
1076 call_rcu(&mnt
->mnt_rcu
, delayed_free_vfsmnt
);
1079 static void __cleanup_mnt(struct rcu_head
*head
)
1081 cleanup_mnt(container_of(head
, struct mount
, mnt_rcu
));
1084 static LLIST_HEAD(delayed_mntput_list
);
1085 static void delayed_mntput(struct work_struct
*unused
)
1087 struct llist_node
*node
= llist_del_all(&delayed_mntput_list
);
1088 struct llist_node
*next
;
1090 for (; node
; node
= next
) {
1091 next
= llist_next(node
);
1092 cleanup_mnt(llist_entry(node
, struct mount
, mnt_llist
));
1095 static DECLARE_DELAYED_WORK(delayed_mntput_work
, delayed_mntput
);
1097 static void mntput_no_expire(struct mount
*mnt
)
1100 mnt_add_count(mnt
, -1);
1101 if (likely(mnt
->mnt_ns
)) { /* shouldn't be the last one */
1106 if (mnt_get_count(mnt
)) {
1108 unlock_mount_hash();
1111 if (unlikely(mnt
->mnt
.mnt_flags
& MNT_DOOMED
)) {
1113 unlock_mount_hash();
1116 mnt
->mnt
.mnt_flags
|= MNT_DOOMED
;
1119 list_del(&mnt
->mnt_instance
);
1121 if (unlikely(!list_empty(&mnt
->mnt_mounts
))) {
1122 struct mount
*p
, *tmp
;
1123 list_for_each_entry_safe(p
, tmp
, &mnt
->mnt_mounts
, mnt_child
) {
1127 unlock_mount_hash();
1129 if (likely(!(mnt
->mnt
.mnt_flags
& MNT_INTERNAL
))) {
1130 struct task_struct
*task
= current
;
1131 if (likely(!(task
->flags
& PF_KTHREAD
))) {
1132 init_task_work(&mnt
->mnt_rcu
, __cleanup_mnt
);
1133 if (!task_work_add(task
, &mnt
->mnt_rcu
, true))
1136 if (llist_add(&mnt
->mnt_llist
, &delayed_mntput_list
))
1137 schedule_delayed_work(&delayed_mntput_work
, 1);
1143 void mntput(struct vfsmount
*mnt
)
1146 struct mount
*m
= real_mount(mnt
);
1147 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1148 if (unlikely(m
->mnt_expiry_mark
))
1149 m
->mnt_expiry_mark
= 0;
1150 mntput_no_expire(m
);
1153 EXPORT_SYMBOL(mntput
);
1155 struct vfsmount
*mntget(struct vfsmount
*mnt
)
1158 mnt_add_count(real_mount(mnt
), 1);
1161 EXPORT_SYMBOL(mntget
);
1163 struct vfsmount
*mnt_clone_internal(struct path
*path
)
1166 p
= clone_mnt(real_mount(path
->mnt
), path
->dentry
, CL_PRIVATE
);
1169 p
->mnt
.mnt_flags
|= MNT_INTERNAL
;
1173 static inline void mangle(struct seq_file
*m
, const char *s
)
1175 seq_escape(m
, s
, " \t\n\\");
1179 * Simple .show_options callback for filesystems which don't want to
1180 * implement more complex mount option showing.
1182 * See also save_mount_options().
1184 int generic_show_options(struct seq_file
*m
, struct dentry
*root
)
1186 const char *options
;
1189 options
= rcu_dereference(root
->d_sb
->s_options
);
1191 if (options
!= NULL
&& options
[0]) {
1199 EXPORT_SYMBOL(generic_show_options
);
1202 * If filesystem uses generic_show_options(), this function should be
1203 * called from the fill_super() callback.
1205 * The .remount_fs callback usually needs to be handled in a special
1206 * way, to make sure, that previous options are not overwritten if the
1209 * Also note, that if the filesystem's .remount_fs function doesn't
1210 * reset all options to their default value, but changes only newly
1211 * given options, then the displayed options will not reflect reality
1214 void save_mount_options(struct super_block
*sb
, char *options
)
1216 BUG_ON(sb
->s_options
);
1217 rcu_assign_pointer(sb
->s_options
, kstrdup(options
, GFP_KERNEL
));
1219 EXPORT_SYMBOL(save_mount_options
);
1221 void replace_mount_options(struct super_block
*sb
, char *options
)
1223 char *old
= sb
->s_options
;
1224 rcu_assign_pointer(sb
->s_options
, options
);
1230 EXPORT_SYMBOL(replace_mount_options
);
1232 #ifdef CONFIG_PROC_FS
1233 /* iterator; we want it to have access to namespace_sem, thus here... */
1234 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
1236 struct proc_mounts
*p
= m
->private;
1238 down_read(&namespace_sem
);
1239 if (p
->cached_event
== p
->ns
->event
) {
1240 void *v
= p
->cached_mount
;
1241 if (*pos
== p
->cached_index
)
1243 if (*pos
== p
->cached_index
+ 1) {
1244 v
= seq_list_next(v
, &p
->ns
->list
, &p
->cached_index
);
1245 return p
->cached_mount
= v
;
1249 p
->cached_event
= p
->ns
->event
;
1250 p
->cached_mount
= seq_list_start(&p
->ns
->list
, *pos
);
1251 p
->cached_index
= *pos
;
1252 return p
->cached_mount
;
1255 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1257 struct proc_mounts
*p
= m
->private;
1259 p
->cached_mount
= seq_list_next(v
, &p
->ns
->list
, pos
);
1260 p
->cached_index
= *pos
;
1261 return p
->cached_mount
;
1264 static void m_stop(struct seq_file
*m
, void *v
)
1266 up_read(&namespace_sem
);
1269 static int m_show(struct seq_file
*m
, void *v
)
1271 struct proc_mounts
*p
= m
->private;
1272 struct mount
*r
= list_entry(v
, struct mount
, mnt_list
);
1273 return p
->show(m
, &r
->mnt
);
1276 const struct seq_operations mounts_op
= {
1282 #endif /* CONFIG_PROC_FS */
1285 * may_umount_tree - check if a mount tree is busy
1286 * @mnt: root of mount tree
1288 * This is called to check if a tree of mounts has any
1289 * open files, pwds, chroots or sub mounts that are
1292 int may_umount_tree(struct vfsmount
*m
)
1294 struct mount
*mnt
= real_mount(m
);
1295 int actual_refs
= 0;
1296 int minimum_refs
= 0;
1300 /* write lock needed for mnt_get_count */
1302 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1303 actual_refs
+= mnt_get_count(p
);
1306 unlock_mount_hash();
1308 if (actual_refs
> minimum_refs
)
1314 EXPORT_SYMBOL(may_umount_tree
);
1317 * may_umount - check if a mount point is busy
1318 * @mnt: root of mount
1320 * This is called to check if a mount point has any
1321 * open files, pwds, chroots or sub mounts. If the
1322 * mount has sub mounts this will return busy
1323 * regardless of whether the sub mounts are busy.
1325 * Doesn't take quota and stuff into account. IOW, in some cases it will
1326 * give false negatives. The main reason why it's here is that we need
1327 * a non-destructive way to look for easily umountable filesystems.
1329 int may_umount(struct vfsmount
*mnt
)
1332 down_read(&namespace_sem
);
1334 if (propagate_mount_busy(real_mount(mnt
), 2))
1336 unlock_mount_hash();
1337 up_read(&namespace_sem
);
1341 EXPORT_SYMBOL(may_umount
);
1343 static HLIST_HEAD(unmounted
); /* protected by namespace_sem */
1345 static void namespace_unlock(void)
1347 struct hlist_head head
;
1349 hlist_move_list(&unmounted
, &head
);
1351 up_write(&namespace_sem
);
1353 if (likely(hlist_empty(&head
)))
1358 group_pin_kill(&head
);
1361 static inline void namespace_lock(void)
1363 down_write(&namespace_sem
);
1366 enum umount_tree_flags
{
1368 UMOUNT_PROPAGATE
= 2,
1369 UMOUNT_CONNECTED
= 4,
1372 static bool disconnect_mount(struct mount
*mnt
, enum umount_tree_flags how
)
1374 /* Leaving mounts connected is only valid for lazy umounts */
1375 if (how
& UMOUNT_SYNC
)
1378 /* A mount without a parent has nothing to be connected to */
1379 if (!mnt_has_parent(mnt
))
1382 /* Because the reference counting rules change when mounts are
1383 * unmounted and connected, umounted mounts may not be
1384 * connected to mounted mounts.
1386 if (!(mnt
->mnt_parent
->mnt
.mnt_flags
& MNT_UMOUNT
))
1389 /* Has it been requested that the mount remain connected? */
1390 if (how
& UMOUNT_CONNECTED
)
1393 /* Is the mount locked such that it needs to remain connected? */
1394 if (IS_MNT_LOCKED(mnt
))
1397 /* By default disconnect the mount */
1402 * mount_lock must be held
1403 * namespace_sem must be held for write
1405 static void umount_tree(struct mount
*mnt
, enum umount_tree_flags how
)
1407 LIST_HEAD(tmp_list
);
1410 if (how
& UMOUNT_PROPAGATE
)
1411 propagate_mount_unlock(mnt
);
1413 /* Gather the mounts to umount */
1414 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1415 p
->mnt
.mnt_flags
|= MNT_UMOUNT
;
1416 list_move(&p
->mnt_list
, &tmp_list
);
1419 /* Hide the mounts from mnt_mounts */
1420 list_for_each_entry(p
, &tmp_list
, mnt_list
) {
1421 list_del_init(&p
->mnt_child
);
1424 /* Add propogated mounts to the tmp_list */
1425 if (how
& UMOUNT_PROPAGATE
)
1426 propagate_umount(&tmp_list
);
1428 while (!list_empty(&tmp_list
)) {
1429 struct mnt_namespace
*ns
;
1431 p
= list_first_entry(&tmp_list
, struct mount
, mnt_list
);
1432 list_del_init(&p
->mnt_expire
);
1433 list_del_init(&p
->mnt_list
);
1437 __touch_mnt_namespace(ns
);
1440 if (how
& UMOUNT_SYNC
)
1441 p
->mnt
.mnt_flags
|= MNT_SYNC_UMOUNT
;
1443 disconnect
= disconnect_mount(p
, how
);
1445 pin_insert_group(&p
->mnt_umount
, &p
->mnt_parent
->mnt
,
1446 disconnect
? &unmounted
: NULL
);
1447 if (mnt_has_parent(p
)) {
1448 mnt_add_count(p
->mnt_parent
, -1);
1450 /* Don't forget about p */
1451 list_add_tail(&p
->mnt_child
, &p
->mnt_parent
->mnt_mounts
);
1456 change_mnt_propagation(p
, MS_PRIVATE
);
1460 static void shrink_submounts(struct mount
*mnt
);
1462 static int do_umount(struct mount
*mnt
, int flags
)
1464 struct super_block
*sb
= mnt
->mnt
.mnt_sb
;
1467 retval
= security_sb_umount(&mnt
->mnt
, flags
);
1472 * Allow userspace to request a mountpoint be expired rather than
1473 * unmounting unconditionally. Unmount only happens if:
1474 * (1) the mark is already set (the mark is cleared by mntput())
1475 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1477 if (flags
& MNT_EXPIRE
) {
1478 if (&mnt
->mnt
== current
->fs
->root
.mnt
||
1479 flags
& (MNT_FORCE
| MNT_DETACH
))
1483 * probably don't strictly need the lock here if we examined
1484 * all race cases, but it's a slowpath.
1487 if (mnt_get_count(mnt
) != 2) {
1488 unlock_mount_hash();
1491 unlock_mount_hash();
1493 if (!xchg(&mnt
->mnt_expiry_mark
, 1))
1498 * If we may have to abort operations to get out of this
1499 * mount, and they will themselves hold resources we must
1500 * allow the fs to do things. In the Unix tradition of
1501 * 'Gee thats tricky lets do it in userspace' the umount_begin
1502 * might fail to complete on the first run through as other tasks
1503 * must return, and the like. Thats for the mount program to worry
1504 * about for the moment.
1507 if (flags
& MNT_FORCE
&& sb
->s_op
->umount_begin
) {
1508 sb
->s_op
->umount_begin(sb
);
1512 * No sense to grab the lock for this test, but test itself looks
1513 * somewhat bogus. Suggestions for better replacement?
1514 * Ho-hum... In principle, we might treat that as umount + switch
1515 * to rootfs. GC would eventually take care of the old vfsmount.
1516 * Actually it makes sense, especially if rootfs would contain a
1517 * /reboot - static binary that would close all descriptors and
1518 * call reboot(9). Then init(8) could umount root and exec /reboot.
1520 if (&mnt
->mnt
== current
->fs
->root
.mnt
&& !(flags
& MNT_DETACH
)) {
1522 * Special case for "unmounting" root ...
1523 * we just try to remount it readonly.
1525 if (!ns_capable(sb
->s_user_ns
, CAP_SYS_ADMIN
))
1527 down_write(&sb
->s_umount
);
1528 if (!(sb
->s_flags
& MS_RDONLY
))
1529 retval
= do_remount_sb(sb
, MS_RDONLY
, NULL
, 0);
1530 up_write(&sb
->s_umount
);
1538 if (flags
& MNT_DETACH
) {
1539 if (!list_empty(&mnt
->mnt_list
))
1540 umount_tree(mnt
, UMOUNT_PROPAGATE
);
1543 shrink_submounts(mnt
);
1545 if (!propagate_mount_busy(mnt
, 2)) {
1546 if (!list_empty(&mnt
->mnt_list
))
1547 umount_tree(mnt
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
1551 unlock_mount_hash();
1557 * __detach_mounts - lazily unmount all mounts on the specified dentry
1559 * During unlink, rmdir, and d_drop it is possible to loose the path
1560 * to an existing mountpoint, and wind up leaking the mount.
1561 * detach_mounts allows lazily unmounting those mounts instead of
1564 * The caller may hold dentry->d_inode->i_mutex.
1566 void __detach_mounts(struct dentry
*dentry
)
1568 struct mountpoint
*mp
;
1572 mp
= lookup_mountpoint(dentry
);
1573 if (IS_ERR_OR_NULL(mp
))
1578 while (!hlist_empty(&mp
->m_list
)) {
1579 mnt
= hlist_entry(mp
->m_list
.first
, struct mount
, mnt_mp_list
);
1580 if (mnt
->mnt
.mnt_flags
& MNT_UMOUNT
) {
1581 hlist_add_head(&mnt
->mnt_umount
.s_list
, &unmounted
);
1584 else umount_tree(mnt
, UMOUNT_CONNECTED
);
1586 unlock_mount_hash();
1593 * Is the caller allowed to modify his namespace?
1595 static inline bool may_mount(void)
1597 return ns_capable(current
->nsproxy
->mnt_ns
->user_ns
, CAP_SYS_ADMIN
);
1601 * Now umount can handle mount points as well as block devices.
1602 * This is important for filesystems which use unnamed block devices.
1604 * We now support a flag for forced unmount like the other 'big iron'
1605 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1608 SYSCALL_DEFINE2(umount
, char __user
*, name
, int, flags
)
1613 int lookup_flags
= 0;
1615 if (flags
& ~(MNT_FORCE
| MNT_DETACH
| MNT_EXPIRE
| UMOUNT_NOFOLLOW
))
1621 if (!(flags
& UMOUNT_NOFOLLOW
))
1622 lookup_flags
|= LOOKUP_FOLLOW
;
1624 retval
= user_path_mountpoint_at(AT_FDCWD
, name
, lookup_flags
, &path
);
1627 mnt
= real_mount(path
.mnt
);
1629 if (path
.dentry
!= path
.mnt
->mnt_root
)
1631 if (!check_mnt(mnt
))
1633 if (mnt
->mnt
.mnt_flags
& MNT_LOCKED
)
1636 if (flags
& MNT_FORCE
&& !capable(CAP_SYS_ADMIN
))
1639 retval
= do_umount(mnt
, flags
);
1641 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1643 mntput_no_expire(mnt
);
1648 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1651 * The 2.0 compatible umount. No flags.
1653 SYSCALL_DEFINE1(oldumount
, char __user
*, name
)
1655 return sys_umount(name
, 0);
1660 static bool is_mnt_ns_file(struct dentry
*dentry
)
1662 /* Is this a proxy for a mount namespace? */
1663 return dentry
->d_op
== &ns_dentry_operations
&&
1664 dentry
->d_fsdata
== &mntns_operations
;
1667 struct mnt_namespace
*to_mnt_ns(struct ns_common
*ns
)
1669 return container_of(ns
, struct mnt_namespace
, ns
);
1672 static bool mnt_ns_loop(struct dentry
*dentry
)
1674 /* Could bind mounting the mount namespace inode cause a
1675 * mount namespace loop?
1677 struct mnt_namespace
*mnt_ns
;
1678 if (!is_mnt_ns_file(dentry
))
1681 mnt_ns
= to_mnt_ns(get_proc_ns(dentry
->d_inode
));
1682 return current
->nsproxy
->mnt_ns
->seq
>= mnt_ns
->seq
;
1685 struct mount
*copy_tree(struct mount
*mnt
, struct dentry
*dentry
,
1688 struct mount
*res
, *p
, *q
, *r
, *parent
;
1690 if (!(flag
& CL_COPY_UNBINDABLE
) && IS_MNT_UNBINDABLE(mnt
))
1691 return ERR_PTR(-EINVAL
);
1693 if (!(flag
& CL_COPY_MNT_NS_FILE
) && is_mnt_ns_file(dentry
))
1694 return ERR_PTR(-EINVAL
);
1696 res
= q
= clone_mnt(mnt
, dentry
, flag
);
1700 q
->mnt_mountpoint
= mnt
->mnt_mountpoint
;
1703 list_for_each_entry(r
, &mnt
->mnt_mounts
, mnt_child
) {
1705 if (!is_subdir(r
->mnt_mountpoint
, dentry
))
1708 for (s
= r
; s
; s
= next_mnt(s
, r
)) {
1709 struct mount
*t
= NULL
;
1710 if (!(flag
& CL_COPY_UNBINDABLE
) &&
1711 IS_MNT_UNBINDABLE(s
)) {
1712 s
= skip_mnt_tree(s
);
1715 if (!(flag
& CL_COPY_MNT_NS_FILE
) &&
1716 is_mnt_ns_file(s
->mnt
.mnt_root
)) {
1717 s
= skip_mnt_tree(s
);
1720 while (p
!= s
->mnt_parent
) {
1726 q
= clone_mnt(p
, p
->mnt
.mnt_root
, flag
);
1730 list_add_tail(&q
->mnt_list
, &res
->mnt_list
);
1731 mnt_set_mountpoint(parent
, p
->mnt_mp
, q
);
1732 if (!list_empty(&parent
->mnt_mounts
)) {
1733 t
= list_last_entry(&parent
->mnt_mounts
,
1734 struct mount
, mnt_child
);
1735 if (t
->mnt_mp
!= p
->mnt_mp
)
1738 attach_shadowed(q
, parent
, t
);
1739 unlock_mount_hash();
1746 umount_tree(res
, UMOUNT_SYNC
);
1747 unlock_mount_hash();
1752 /* Caller should check returned pointer for errors */
1754 struct vfsmount
*collect_mounts(struct path
*path
)
1758 if (!check_mnt(real_mount(path
->mnt
)))
1759 tree
= ERR_PTR(-EINVAL
);
1761 tree
= copy_tree(real_mount(path
->mnt
), path
->dentry
,
1762 CL_COPY_ALL
| CL_PRIVATE
);
1765 return ERR_CAST(tree
);
1769 void drop_collected_mounts(struct vfsmount
*mnt
)
1773 umount_tree(real_mount(mnt
), UMOUNT_SYNC
);
1774 unlock_mount_hash();
1779 * clone_private_mount - create a private clone of a path
1781 * This creates a new vfsmount, which will be the clone of @path. The new will
1782 * not be attached anywhere in the namespace and will be private (i.e. changes
1783 * to the originating mount won't be propagated into this).
1785 * Release with mntput().
1787 struct vfsmount
*clone_private_mount(struct path
*path
)
1789 struct mount
*old_mnt
= real_mount(path
->mnt
);
1790 struct mount
*new_mnt
;
1792 if (IS_MNT_UNBINDABLE(old_mnt
))
1793 return ERR_PTR(-EINVAL
);
1795 down_read(&namespace_sem
);
1796 new_mnt
= clone_mnt(old_mnt
, path
->dentry
, CL_PRIVATE
);
1797 up_read(&namespace_sem
);
1798 if (IS_ERR(new_mnt
))
1799 return ERR_CAST(new_mnt
);
1801 return &new_mnt
->mnt
;
1803 EXPORT_SYMBOL_GPL(clone_private_mount
);
1805 int iterate_mounts(int (*f
)(struct vfsmount
*, void *), void *arg
,
1806 struct vfsmount
*root
)
1809 int res
= f(root
, arg
);
1812 list_for_each_entry(mnt
, &real_mount(root
)->mnt_list
, mnt_list
) {
1813 res
= f(&mnt
->mnt
, arg
);
1819 EXPORT_SYMBOL(iterate_mounts
);
1821 static void cleanup_group_ids(struct mount
*mnt
, struct mount
*end
)
1825 for (p
= mnt
; p
!= end
; p
= next_mnt(p
, mnt
)) {
1826 if (p
->mnt_group_id
&& !IS_MNT_SHARED(p
))
1827 mnt_release_group_id(p
);
1831 static int invent_group_ids(struct mount
*mnt
, bool recurse
)
1835 for (p
= mnt
; p
; p
= recurse
? next_mnt(p
, mnt
) : NULL
) {
1836 if (!p
->mnt_group_id
&& !IS_MNT_SHARED(p
)) {
1837 int err
= mnt_alloc_group_id(p
);
1839 cleanup_group_ids(mnt
, p
);
1848 int count_mounts(struct mnt_namespace
*ns
, struct mount
*mnt
)
1850 unsigned int max
= READ_ONCE(sysctl_mount_max
);
1851 unsigned int mounts
= 0, old
, pending
, sum
;
1854 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
))
1858 pending
= ns
->pending_mounts
;
1859 sum
= old
+ pending
;
1863 (mounts
> (max
- sum
)))
1866 ns
->pending_mounts
= pending
+ mounts
;
1871 * @source_mnt : mount tree to be attached
1872 * @nd : place the mount tree @source_mnt is attached
1873 * @parent_nd : if non-null, detach the source_mnt from its parent and
1874 * store the parent mount and mountpoint dentry.
1875 * (done when source_mnt is moved)
1877 * NOTE: in the table below explains the semantics when a source mount
1878 * of a given type is attached to a destination mount of a given type.
1879 * ---------------------------------------------------------------------------
1880 * | BIND MOUNT OPERATION |
1881 * |**************************************************************************
1882 * | source-->| shared | private | slave | unbindable |
1886 * |**************************************************************************
1887 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1889 * |non-shared| shared (+) | private | slave (*) | invalid |
1890 * ***************************************************************************
1891 * A bind operation clones the source mount and mounts the clone on the
1892 * destination mount.
1894 * (++) the cloned mount is propagated to all the mounts in the propagation
1895 * tree of the destination mount and the cloned mount is added to
1896 * the peer group of the source mount.
1897 * (+) the cloned mount is created under the destination mount and is marked
1898 * as shared. The cloned mount is added to the peer group of the source
1900 * (+++) the mount is propagated to all the mounts in the propagation tree
1901 * of the destination mount and the cloned mount is made slave
1902 * of the same master as that of the source mount. The cloned mount
1903 * is marked as 'shared and slave'.
1904 * (*) the cloned mount is made a slave of the same master as that of the
1907 * ---------------------------------------------------------------------------
1908 * | MOVE MOUNT OPERATION |
1909 * |**************************************************************************
1910 * | source-->| shared | private | slave | unbindable |
1914 * |**************************************************************************
1915 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1917 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1918 * ***************************************************************************
1920 * (+) the mount is moved to the destination. And is then propagated to
1921 * all the mounts in the propagation tree of the destination mount.
1922 * (+*) the mount is moved to the destination.
1923 * (+++) the mount is moved to the destination and is then propagated to
1924 * all the mounts belonging to the destination mount's propagation tree.
1925 * the mount is marked as 'shared and slave'.
1926 * (*) the mount continues to be a slave at the new location.
1928 * if the source mount is a tree, the operations explained above is
1929 * applied to each mount in the tree.
1930 * Must be called without spinlocks held, since this function can sleep
1933 static int attach_recursive_mnt(struct mount
*source_mnt
,
1934 struct mount
*dest_mnt
,
1935 struct mountpoint
*dest_mp
,
1936 struct path
*parent_path
)
1938 HLIST_HEAD(tree_list
);
1939 struct mnt_namespace
*ns
= dest_mnt
->mnt_ns
;
1940 struct mount
*child
, *p
;
1941 struct hlist_node
*n
;
1944 /* Is there space to add these mounts to the mount namespace? */
1946 err
= count_mounts(ns
, source_mnt
);
1951 if (IS_MNT_SHARED(dest_mnt
)) {
1952 err
= invent_group_ids(source_mnt
, true);
1955 err
= propagate_mnt(dest_mnt
, dest_mp
, source_mnt
, &tree_list
);
1958 goto out_cleanup_ids
;
1959 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
1965 detach_mnt(source_mnt
, parent_path
);
1966 attach_mnt(source_mnt
, dest_mnt
, dest_mp
);
1967 touch_mnt_namespace(source_mnt
->mnt_ns
);
1969 mnt_set_mountpoint(dest_mnt
, dest_mp
, source_mnt
);
1970 commit_tree(source_mnt
, NULL
);
1973 hlist_for_each_entry_safe(child
, n
, &tree_list
, mnt_hash
) {
1975 hlist_del_init(&child
->mnt_hash
);
1976 q
= __lookup_mnt_last(&child
->mnt_parent
->mnt
,
1977 child
->mnt_mountpoint
);
1978 commit_tree(child
, q
);
1980 unlock_mount_hash();
1985 while (!hlist_empty(&tree_list
)) {
1986 child
= hlist_entry(tree_list
.first
, struct mount
, mnt_hash
);
1987 child
->mnt_parent
->mnt_ns
->pending_mounts
= 0;
1988 umount_tree(child
, UMOUNT_SYNC
);
1990 unlock_mount_hash();
1991 cleanup_group_ids(source_mnt
, NULL
);
1993 ns
->pending_mounts
= 0;
1997 static struct mountpoint
*lock_mount(struct path
*path
)
1999 struct vfsmount
*mnt
;
2000 struct dentry
*dentry
= path
->dentry
;
2002 mutex_lock(&dentry
->d_inode
->i_mutex
);
2003 if (unlikely(cant_mount(dentry
))) {
2004 mutex_unlock(&dentry
->d_inode
->i_mutex
);
2005 return ERR_PTR(-ENOENT
);
2008 mnt
= lookup_mnt(path
);
2010 struct mountpoint
*mp
= lookup_mountpoint(dentry
);
2012 mp
= new_mountpoint(dentry
);
2015 mutex_unlock(&dentry
->d_inode
->i_mutex
);
2021 mutex_unlock(&path
->dentry
->d_inode
->i_mutex
);
2024 dentry
= path
->dentry
= dget(mnt
->mnt_root
);
2028 static void unlock_mount(struct mountpoint
*where
)
2030 struct dentry
*dentry
= where
->m_dentry
;
2031 put_mountpoint(where
);
2033 mutex_unlock(&dentry
->d_inode
->i_mutex
);
2036 static int graft_tree(struct mount
*mnt
, struct mount
*p
, struct mountpoint
*mp
)
2038 if (mnt
->mnt
.mnt_sb
->s_flags
& MS_NOUSER
)
2041 if (d_is_dir(mp
->m_dentry
) !=
2042 d_is_dir(mnt
->mnt
.mnt_root
))
2045 return attach_recursive_mnt(mnt
, p
, mp
, NULL
);
2049 * Sanity check the flags to change_mnt_propagation.
2052 static int flags_to_propagation_type(int flags
)
2054 int type
= flags
& ~(MS_REC
| MS_SILENT
);
2056 /* Fail if any non-propagation flags are set */
2057 if (type
& ~(MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
2059 /* Only one propagation flag should be set */
2060 if (!is_power_of_2(type
))
2066 * recursively change the type of the mountpoint.
2068 static int do_change_type(struct path
*path
, int flag
)
2071 struct mount
*mnt
= real_mount(path
->mnt
);
2072 int recurse
= flag
& MS_REC
;
2076 if (path
->dentry
!= path
->mnt
->mnt_root
)
2079 type
= flags_to_propagation_type(flag
);
2084 if (type
== MS_SHARED
) {
2085 err
= invent_group_ids(mnt
, recurse
);
2091 for (m
= mnt
; m
; m
= (recurse
? next_mnt(m
, mnt
) : NULL
))
2092 change_mnt_propagation(m
, type
);
2093 unlock_mount_hash();
2100 static bool has_locked_children(struct mount
*mnt
, struct dentry
*dentry
)
2102 struct mount
*child
;
2103 list_for_each_entry(child
, &mnt
->mnt_mounts
, mnt_child
) {
2104 if (!is_subdir(child
->mnt_mountpoint
, dentry
))
2107 if (child
->mnt
.mnt_flags
& MNT_LOCKED
)
2114 * do loopback mount.
2116 static int do_loopback(struct path
*path
, const char *old_name
,
2119 struct path old_path
;
2120 struct mount
*mnt
= NULL
, *old
, *parent
;
2121 struct mountpoint
*mp
;
2123 if (!old_name
|| !*old_name
)
2125 err
= kern_path(old_name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &old_path
);
2130 if (mnt_ns_loop(old_path
.dentry
))
2133 mp
= lock_mount(path
);
2138 old
= real_mount(old_path
.mnt
);
2139 parent
= real_mount(path
->mnt
);
2142 if (IS_MNT_UNBINDABLE(old
))
2145 if (!check_mnt(parent
))
2148 if (!check_mnt(old
) && old_path
.dentry
->d_op
!= &ns_dentry_operations
)
2151 if (!recurse
&& has_locked_children(old
, old_path
.dentry
))
2155 mnt
= copy_tree(old
, old_path
.dentry
, CL_COPY_MNT_NS_FILE
);
2157 mnt
= clone_mnt(old
, old_path
.dentry
, 0);
2164 mnt
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
2166 err
= graft_tree(mnt
, parent
, mp
);
2169 umount_tree(mnt
, UMOUNT_SYNC
);
2170 unlock_mount_hash();
2175 path_put(&old_path
);
2179 static int change_mount_flags(struct vfsmount
*mnt
, int ms_flags
)
2182 int readonly_request
= 0;
2184 if (ms_flags
& MS_RDONLY
)
2185 readonly_request
= 1;
2186 if (readonly_request
== __mnt_is_readonly(mnt
))
2189 if (readonly_request
)
2190 error
= mnt_make_readonly(real_mount(mnt
));
2192 __mnt_unmake_readonly(real_mount(mnt
));
2197 * change filesystem flags. dir should be a physical root of filesystem.
2198 * If you've mounted a non-root directory somewhere and want to do remount
2199 * on it - tough luck.
2201 static int do_remount(struct path
*path
, int flags
, int mnt_flags
,
2205 struct super_block
*sb
= path
->mnt
->mnt_sb
;
2206 struct mount
*mnt
= real_mount(path
->mnt
);
2208 if (!check_mnt(mnt
))
2211 if (path
->dentry
!= path
->mnt
->mnt_root
)
2214 /* Don't allow changing of locked mnt flags.
2216 * No locks need to be held here while testing the various
2217 * MNT_LOCK flags because those flags can never be cleared
2218 * once they are set.
2220 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_READONLY
) &&
2221 !(mnt_flags
& MNT_READONLY
)) {
2224 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_NODEV
) &&
2225 !(mnt_flags
& MNT_NODEV
)) {
2226 /* Was the nodev implicitly added in mount? */
2227 if ((mnt
->mnt_ns
->user_ns
!= &init_user_ns
) &&
2228 !(sb
->s_type
->fs_flags
& FS_USERNS_DEV_MOUNT
)) {
2229 mnt_flags
|= MNT_NODEV
;
2234 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_NOSUID
) &&
2235 !(mnt_flags
& MNT_NOSUID
)) {
2238 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_NOEXEC
) &&
2239 !(mnt_flags
& MNT_NOEXEC
)) {
2242 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_ATIME
) &&
2243 ((mnt
->mnt
.mnt_flags
& MNT_ATIME_MASK
) != (mnt_flags
& MNT_ATIME_MASK
))) {
2247 err
= security_sb_remount(sb
, data
);
2251 down_write(&sb
->s_umount
);
2252 if (flags
& MS_BIND
)
2253 err
= change_mount_flags(path
->mnt
, flags
);
2254 else if (!ns_capable(sb
->s_user_ns
, CAP_SYS_ADMIN
))
2257 err
= do_remount_sb(sb
, flags
, data
, 0);
2260 mnt_flags
|= mnt
->mnt
.mnt_flags
& ~MNT_USER_SETTABLE_MASK
;
2261 mnt
->mnt
.mnt_flags
= mnt_flags
;
2262 touch_mnt_namespace(mnt
->mnt_ns
);
2263 unlock_mount_hash();
2265 up_write(&sb
->s_umount
);
2269 static inline int tree_contains_unbindable(struct mount
*mnt
)
2272 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
2273 if (IS_MNT_UNBINDABLE(p
))
2279 static int do_move_mount(struct path
*path
, const char *old_name
)
2281 struct path old_path
, parent_path
;
2284 struct mountpoint
*mp
;
2286 if (!old_name
|| !*old_name
)
2288 err
= kern_path(old_name
, LOOKUP_FOLLOW
, &old_path
);
2292 mp
= lock_mount(path
);
2297 old
= real_mount(old_path
.mnt
);
2298 p
= real_mount(path
->mnt
);
2301 if (!check_mnt(p
) || !check_mnt(old
))
2304 if (old
->mnt
.mnt_flags
& MNT_LOCKED
)
2308 if (old_path
.dentry
!= old_path
.mnt
->mnt_root
)
2311 if (!mnt_has_parent(old
))
2314 if (d_is_dir(path
->dentry
) !=
2315 d_is_dir(old_path
.dentry
))
2318 * Don't move a mount residing in a shared parent.
2320 if (IS_MNT_SHARED(old
->mnt_parent
))
2323 * Don't move a mount tree containing unbindable mounts to a destination
2324 * mount which is shared.
2326 if (IS_MNT_SHARED(p
) && tree_contains_unbindable(old
))
2329 for (; mnt_has_parent(p
); p
= p
->mnt_parent
)
2333 err
= attach_recursive_mnt(old
, real_mount(path
->mnt
), mp
, &parent_path
);
2337 /* if the mount is moved, it should no longer be expire
2339 list_del_init(&old
->mnt_expire
);
2344 path_put(&parent_path
);
2345 path_put(&old_path
);
2349 static struct vfsmount
*fs_set_subtype(struct vfsmount
*mnt
, const char *fstype
)
2352 const char *subtype
= strchr(fstype
, '.');
2361 mnt
->mnt_sb
->s_subtype
= kstrdup(subtype
, GFP_KERNEL
);
2363 if (!mnt
->mnt_sb
->s_subtype
)
2369 return ERR_PTR(err
);
2373 * add a mount into a namespace's mount tree
2375 static int do_add_mount(struct mount
*newmnt
, struct path
*path
, int mnt_flags
)
2377 struct mountpoint
*mp
;
2378 struct mount
*parent
;
2381 mnt_flags
&= ~MNT_INTERNAL_FLAGS
;
2383 mp
= lock_mount(path
);
2387 parent
= real_mount(path
->mnt
);
2389 if (unlikely(!check_mnt(parent
))) {
2390 /* that's acceptable only for automounts done in private ns */
2391 if (!(mnt_flags
& MNT_SHRINKABLE
))
2393 /* ... and for those we'd better have mountpoint still alive */
2394 if (!parent
->mnt_ns
)
2398 /* Refuse the same filesystem on the same mount point */
2400 if (path
->mnt
->mnt_sb
== newmnt
->mnt
.mnt_sb
&&
2401 path
->mnt
->mnt_root
== path
->dentry
)
2405 if (d_is_symlink(newmnt
->mnt
.mnt_root
))
2408 newmnt
->mnt
.mnt_flags
= mnt_flags
;
2409 err
= graft_tree(newmnt
, parent
, mp
);
2416 static bool fs_fully_visible(struct file_system_type
*fs_type
, int *new_mnt_flags
);
2419 * create a new mount for userspace and request it to be added into the
2422 static int do_new_mount(struct path
*path
, const char *fstype
, int flags
,
2423 int mnt_flags
, const char *name
, void *data
)
2425 struct file_system_type
*type
;
2426 struct user_namespace
*user_ns
= current
->nsproxy
->mnt_ns
->user_ns
;
2427 struct vfsmount
*mnt
;
2430 if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN
))
2436 type
= get_fs_type(fstype
);
2440 if (user_ns
!= &init_user_ns
) {
2441 if (!(type
->fs_flags
& FS_USERNS_MOUNT
)) {
2442 put_filesystem(type
);
2445 /* Only in special cases allow devices from mounts
2446 * created outside the initial user namespace.
2448 if (!(type
->fs_flags
& FS_USERNS_DEV_MOUNT
)) {
2450 mnt_flags
|= MNT_NODEV
| MNT_LOCK_NODEV
;
2452 if (type
->fs_flags
& FS_USERNS_VISIBLE
) {
2453 if (!fs_fully_visible(type
, &mnt_flags
)) {
2454 put_filesystem(type
);
2460 mnt
= vfs_kern_mount(type
, flags
, name
, data
);
2461 if (!IS_ERR(mnt
) && (type
->fs_flags
& FS_HAS_SUBTYPE
) &&
2462 !mnt
->mnt_sb
->s_subtype
)
2463 mnt
= fs_set_subtype(mnt
, fstype
);
2465 put_filesystem(type
);
2467 return PTR_ERR(mnt
);
2469 err
= do_add_mount(real_mount(mnt
), path
, mnt_flags
);
2475 int finish_automount(struct vfsmount
*m
, struct path
*path
)
2477 struct mount
*mnt
= real_mount(m
);
2479 /* The new mount record should have at least 2 refs to prevent it being
2480 * expired before we get a chance to add it
2482 BUG_ON(mnt_get_count(mnt
) < 2);
2484 if (m
->mnt_sb
== path
->mnt
->mnt_sb
&&
2485 m
->mnt_root
== path
->dentry
) {
2490 err
= do_add_mount(mnt
, path
, path
->mnt
->mnt_flags
| MNT_SHRINKABLE
);
2494 /* remove m from any expiration list it may be on */
2495 if (!list_empty(&mnt
->mnt_expire
)) {
2497 list_del_init(&mnt
->mnt_expire
);
2506 * mnt_set_expiry - Put a mount on an expiration list
2507 * @mnt: The mount to list.
2508 * @expiry_list: The list to add the mount to.
2510 void mnt_set_expiry(struct vfsmount
*mnt
, struct list_head
*expiry_list
)
2514 list_add_tail(&real_mount(mnt
)->mnt_expire
, expiry_list
);
2518 EXPORT_SYMBOL(mnt_set_expiry
);
2521 * process a list of expirable mountpoints with the intent of discarding any
2522 * mountpoints that aren't in use and haven't been touched since last we came
2525 void mark_mounts_for_expiry(struct list_head
*mounts
)
2527 struct mount
*mnt
, *next
;
2528 LIST_HEAD(graveyard
);
2530 if (list_empty(mounts
))
2536 /* extract from the expiration list every vfsmount that matches the
2537 * following criteria:
2538 * - only referenced by its parent vfsmount
2539 * - still marked for expiry (marked on the last call here; marks are
2540 * cleared by mntput())
2542 list_for_each_entry_safe(mnt
, next
, mounts
, mnt_expire
) {
2543 if (!xchg(&mnt
->mnt_expiry_mark
, 1) ||
2544 propagate_mount_busy(mnt
, 1))
2546 list_move(&mnt
->mnt_expire
, &graveyard
);
2548 while (!list_empty(&graveyard
)) {
2549 mnt
= list_first_entry(&graveyard
, struct mount
, mnt_expire
);
2550 touch_mnt_namespace(mnt
->mnt_ns
);
2551 umount_tree(mnt
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
2553 unlock_mount_hash();
2557 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry
);
2560 * Ripoff of 'select_parent()'
2562 * search the list of submounts for a given mountpoint, and move any
2563 * shrinkable submounts to the 'graveyard' list.
2565 static int select_submounts(struct mount
*parent
, struct list_head
*graveyard
)
2567 struct mount
*this_parent
= parent
;
2568 struct list_head
*next
;
2572 next
= this_parent
->mnt_mounts
.next
;
2574 while (next
!= &this_parent
->mnt_mounts
) {
2575 struct list_head
*tmp
= next
;
2576 struct mount
*mnt
= list_entry(tmp
, struct mount
, mnt_child
);
2579 if (!(mnt
->mnt
.mnt_flags
& MNT_SHRINKABLE
))
2582 * Descend a level if the d_mounts list is non-empty.
2584 if (!list_empty(&mnt
->mnt_mounts
)) {
2589 if (!propagate_mount_busy(mnt
, 1)) {
2590 list_move_tail(&mnt
->mnt_expire
, graveyard
);
2595 * All done at this level ... ascend and resume the search
2597 if (this_parent
!= parent
) {
2598 next
= this_parent
->mnt_child
.next
;
2599 this_parent
= this_parent
->mnt_parent
;
2606 * process a list of expirable mountpoints with the intent of discarding any
2607 * submounts of a specific parent mountpoint
2609 * mount_lock must be held for write
2611 static void shrink_submounts(struct mount
*mnt
)
2613 LIST_HEAD(graveyard
);
2616 /* extract submounts of 'mountpoint' from the expiration list */
2617 while (select_submounts(mnt
, &graveyard
)) {
2618 while (!list_empty(&graveyard
)) {
2619 m
= list_first_entry(&graveyard
, struct mount
,
2621 touch_mnt_namespace(m
->mnt_ns
);
2622 umount_tree(m
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
2628 * Some copy_from_user() implementations do not return the exact number of
2629 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
2630 * Note that this function differs from copy_from_user() in that it will oops
2631 * on bad values of `to', rather than returning a short copy.
2633 static long exact_copy_from_user(void *to
, const void __user
* from
,
2637 const char __user
*f
= from
;
2640 if (!access_ok(VERIFY_READ
, from
, n
))
2644 if (__get_user(c
, f
)) {
2655 int copy_mount_options(const void __user
* data
, unsigned long *where
)
2665 if (!(page
= __get_free_page(GFP_KERNEL
)))
2668 /* We only care that *some* data at the address the user
2669 * gave us is valid. Just in case, we'll zero
2670 * the remainder of the page.
2672 /* copy_from_user cannot cross TASK_SIZE ! */
2673 size
= TASK_SIZE
- (unsigned long)data
;
2674 if (size
> PAGE_SIZE
)
2677 i
= size
- exact_copy_from_user((void *)page
, data
, size
);
2683 memset((char *)page
+ i
, 0, PAGE_SIZE
- i
);
2688 char *copy_mount_string(const void __user
*data
)
2690 return data
? strndup_user(data
, PAGE_SIZE
) : NULL
;
2694 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2695 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2697 * data is a (void *) that can point to any structure up to
2698 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2699 * information (or be NULL).
2701 * Pre-0.97 versions of mount() didn't have a flags word.
2702 * When the flags word was introduced its top half was required
2703 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2704 * Therefore, if this magic number is present, it carries no information
2705 * and must be discarded.
2707 long do_mount(const char *dev_name
, const char __user
*dir_name
,
2708 const char *type_page
, unsigned long flags
, void *data_page
)
2715 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
2716 flags
&= ~MS_MGC_MSK
;
2718 /* Basic sanity checks */
2720 ((char *)data_page
)[PAGE_SIZE
- 1] = 0;
2722 /* ... and get the mountpoint */
2723 retval
= user_path(dir_name
, &path
);
2727 retval
= security_sb_mount(dev_name
, &path
,
2728 type_page
, flags
, data_page
);
2729 if (!retval
&& !may_mount())
2734 /* Default to relatime unless overriden */
2735 if (!(flags
& MS_NOATIME
))
2736 mnt_flags
|= MNT_RELATIME
;
2738 /* Separate the per-mountpoint flags */
2739 if (flags
& MS_NOSUID
)
2740 mnt_flags
|= MNT_NOSUID
;
2741 if (flags
& MS_NODEV
)
2742 mnt_flags
|= MNT_NODEV
;
2743 if (flags
& MS_NOEXEC
)
2744 mnt_flags
|= MNT_NOEXEC
;
2745 if (flags
& MS_NOATIME
)
2746 mnt_flags
|= MNT_NOATIME
;
2747 if (flags
& MS_NODIRATIME
)
2748 mnt_flags
|= MNT_NODIRATIME
;
2749 if (flags
& MS_STRICTATIME
)
2750 mnt_flags
&= ~(MNT_RELATIME
| MNT_NOATIME
);
2751 if (flags
& MS_RDONLY
)
2752 mnt_flags
|= MNT_READONLY
;
2754 /* The default atime for remount is preservation */
2755 if ((flags
& MS_REMOUNT
) &&
2756 ((flags
& (MS_NOATIME
| MS_NODIRATIME
| MS_RELATIME
|
2757 MS_STRICTATIME
)) == 0)) {
2758 mnt_flags
&= ~MNT_ATIME_MASK
;
2759 mnt_flags
|= path
.mnt
->mnt_flags
& MNT_ATIME_MASK
;
2762 flags
&= ~(MS_NOSUID
| MS_NOEXEC
| MS_NODEV
| MS_ACTIVE
| MS_BORN
|
2763 MS_NOATIME
| MS_NODIRATIME
| MS_RELATIME
| MS_KERNMOUNT
|
2766 if (flags
& MS_REMOUNT
)
2767 retval
= do_remount(&path
, flags
& ~MS_REMOUNT
, mnt_flags
,
2769 else if (flags
& MS_BIND
)
2770 retval
= do_loopback(&path
, dev_name
, flags
& MS_REC
);
2771 else if (flags
& (MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
2772 retval
= do_change_type(&path
, flags
);
2773 else if (flags
& MS_MOVE
)
2774 retval
= do_move_mount(&path
, dev_name
);
2776 retval
= do_new_mount(&path
, type_page
, flags
, mnt_flags
,
2777 dev_name
, data_page
);
2783 static void free_mnt_ns(struct mnt_namespace
*ns
)
2785 ns_free_inum(&ns
->ns
);
2786 put_user_ns(ns
->user_ns
);
2791 * Assign a sequence number so we can detect when we attempt to bind
2792 * mount a reference to an older mount namespace into the current
2793 * mount namespace, preventing reference counting loops. A 64bit
2794 * number incrementing at 10Ghz will take 12,427 years to wrap which
2795 * is effectively never, so we can ignore the possibility.
2797 static atomic64_t mnt_ns_seq
= ATOMIC64_INIT(1);
2799 static struct mnt_namespace
*alloc_mnt_ns(struct user_namespace
*user_ns
)
2801 struct mnt_namespace
*new_ns
;
2804 new_ns
= kmalloc(sizeof(struct mnt_namespace
), GFP_KERNEL
);
2806 return ERR_PTR(-ENOMEM
);
2807 ret
= ns_alloc_inum(&new_ns
->ns
);
2810 return ERR_PTR(ret
);
2812 new_ns
->ns
.ops
= &mntns_operations
;
2813 new_ns
->seq
= atomic64_add_return(1, &mnt_ns_seq
);
2814 atomic_set(&new_ns
->count
, 1);
2815 new_ns
->root
= NULL
;
2816 INIT_LIST_HEAD(&new_ns
->list
);
2817 init_waitqueue_head(&new_ns
->poll
);
2819 new_ns
->user_ns
= get_user_ns(user_ns
);
2821 new_ns
->pending_mounts
= 0;
2825 struct mnt_namespace
*copy_mnt_ns(unsigned long flags
, struct mnt_namespace
*ns
,
2826 struct user_namespace
*user_ns
, struct fs_struct
*new_fs
)
2828 struct mnt_namespace
*new_ns
;
2829 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
;
2830 struct mount
*p
, *q
;
2837 if (likely(!(flags
& CLONE_NEWNS
))) {
2844 new_ns
= alloc_mnt_ns(user_ns
);
2849 /* First pass: copy the tree topology */
2850 copy_flags
= CL_COPY_UNBINDABLE
| CL_EXPIRE
;
2851 if (user_ns
!= ns
->user_ns
)
2852 copy_flags
|= CL_SHARED_TO_SLAVE
| CL_UNPRIVILEGED
;
2853 new = copy_tree(old
, old
->mnt
.mnt_root
, copy_flags
);
2856 free_mnt_ns(new_ns
);
2857 return ERR_CAST(new);
2860 list_add_tail(&new_ns
->list
, &new->mnt_list
);
2863 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2864 * as belonging to new namespace. We have already acquired a private
2865 * fs_struct, so tsk->fs->lock is not needed.
2873 if (&p
->mnt
== new_fs
->root
.mnt
) {
2874 new_fs
->root
.mnt
= mntget(&q
->mnt
);
2877 if (&p
->mnt
== new_fs
->pwd
.mnt
) {
2878 new_fs
->pwd
.mnt
= mntget(&q
->mnt
);
2882 p
= next_mnt(p
, old
);
2883 q
= next_mnt(q
, new);
2886 while (p
->mnt
.mnt_root
!= q
->mnt
.mnt_root
)
2887 p
= next_mnt(p
, old
);
2900 * create_mnt_ns - creates a private namespace and adds a root filesystem
2901 * @mnt: pointer to the new root filesystem mountpoint
2903 static struct mnt_namespace
*create_mnt_ns(struct vfsmount
*m
)
2905 struct mnt_namespace
*new_ns
= alloc_mnt_ns(&init_user_ns
);
2906 if (!IS_ERR(new_ns
)) {
2907 struct mount
*mnt
= real_mount(m
);
2908 mnt
->mnt_ns
= new_ns
;
2911 list_add(&mnt
->mnt_list
, &new_ns
->list
);
2918 struct dentry
*mount_subtree(struct vfsmount
*mnt
, const char *name
)
2920 struct mnt_namespace
*ns
;
2921 struct super_block
*s
;
2925 ns
= create_mnt_ns(mnt
);
2927 return ERR_CAST(ns
);
2929 err
= vfs_path_lookup(mnt
->mnt_root
, mnt
,
2930 name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &path
);
2935 return ERR_PTR(err
);
2937 /* trade a vfsmount reference for active sb one */
2938 s
= path
.mnt
->mnt_sb
;
2939 atomic_inc(&s
->s_active
);
2941 /* lock the sucker */
2942 down_write(&s
->s_umount
);
2943 /* ... and return the root of (sub)tree on it */
2946 EXPORT_SYMBOL(mount_subtree
);
2948 SYSCALL_DEFINE5(mount
, char __user
*, dev_name
, char __user
*, dir_name
,
2949 char __user
*, type
, unsigned long, flags
, void __user
*, data
)
2954 unsigned long data_page
;
2956 kernel_type
= copy_mount_string(type
);
2957 ret
= PTR_ERR(kernel_type
);
2958 if (IS_ERR(kernel_type
))
2961 kernel_dev
= copy_mount_string(dev_name
);
2962 ret
= PTR_ERR(kernel_dev
);
2963 if (IS_ERR(kernel_dev
))
2966 ret
= copy_mount_options(data
, &data_page
);
2970 ret
= do_mount(kernel_dev
, dir_name
, kernel_type
, flags
,
2971 (void *) data_page
);
2973 free_page(data_page
);
2983 * Return true if path is reachable from root
2985 * namespace_sem or mount_lock is held
2987 bool is_path_reachable(struct mount
*mnt
, struct dentry
*dentry
,
2988 const struct path
*root
)
2990 while (&mnt
->mnt
!= root
->mnt
&& mnt_has_parent(mnt
)) {
2991 dentry
= mnt
->mnt_mountpoint
;
2992 mnt
= mnt
->mnt_parent
;
2994 return &mnt
->mnt
== root
->mnt
&& is_subdir(dentry
, root
->dentry
);
2997 int path_is_under(struct path
*path1
, struct path
*path2
)
3000 read_seqlock_excl(&mount_lock
);
3001 res
= is_path_reachable(real_mount(path1
->mnt
), path1
->dentry
, path2
);
3002 read_sequnlock_excl(&mount_lock
);
3005 EXPORT_SYMBOL(path_is_under
);
3008 * pivot_root Semantics:
3009 * Moves the root file system of the current process to the directory put_old,
3010 * makes new_root as the new root file system of the current process, and sets
3011 * root/cwd of all processes which had them on the current root to new_root.
3014 * The new_root and put_old must be directories, and must not be on the
3015 * same file system as the current process root. The put_old must be
3016 * underneath new_root, i.e. adding a non-zero number of /.. to the string
3017 * pointed to by put_old must yield the same directory as new_root. No other
3018 * file system may be mounted on put_old. After all, new_root is a mountpoint.
3020 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
3021 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
3022 * in this situation.
3025 * - we don't move root/cwd if they are not at the root (reason: if something
3026 * cared enough to change them, it's probably wrong to force them elsewhere)
3027 * - it's okay to pick a root that isn't the root of a file system, e.g.
3028 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
3029 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
3032 SYSCALL_DEFINE2(pivot_root
, const char __user
*, new_root
,
3033 const char __user
*, put_old
)
3035 struct path
new, old
, parent_path
, root_parent
, root
;
3036 struct mount
*new_mnt
, *root_mnt
, *old_mnt
;
3037 struct mountpoint
*old_mp
, *root_mp
;
3043 error
= user_path_dir(new_root
, &new);
3047 error
= user_path_dir(put_old
, &old
);
3051 error
= security_sb_pivotroot(&old
, &new);
3055 get_fs_root(current
->fs
, &root
);
3056 old_mp
= lock_mount(&old
);
3057 error
= PTR_ERR(old_mp
);
3062 new_mnt
= real_mount(new.mnt
);
3063 root_mnt
= real_mount(root
.mnt
);
3064 old_mnt
= real_mount(old
.mnt
);
3065 if (IS_MNT_SHARED(old_mnt
) ||
3066 IS_MNT_SHARED(new_mnt
->mnt_parent
) ||
3067 IS_MNT_SHARED(root_mnt
->mnt_parent
))
3069 if (!check_mnt(root_mnt
) || !check_mnt(new_mnt
))
3071 if (new_mnt
->mnt
.mnt_flags
& MNT_LOCKED
)
3074 if (d_unlinked(new.dentry
))
3077 if (new_mnt
== root_mnt
|| old_mnt
== root_mnt
)
3078 goto out4
; /* loop, on the same file system */
3080 if (root
.mnt
->mnt_root
!= root
.dentry
)
3081 goto out4
; /* not a mountpoint */
3082 if (!mnt_has_parent(root_mnt
))
3083 goto out4
; /* not attached */
3084 root_mp
= root_mnt
->mnt_mp
;
3085 if (new.mnt
->mnt_root
!= new.dentry
)
3086 goto out4
; /* not a mountpoint */
3087 if (!mnt_has_parent(new_mnt
))
3088 goto out4
; /* not attached */
3089 /* make sure we can reach put_old from new_root */
3090 if (!is_path_reachable(old_mnt
, old
.dentry
, &new))
3092 /* make certain new is below the root */
3093 if (!is_path_reachable(new_mnt
, new.dentry
, &root
))
3095 root_mp
->m_count
++; /* pin it so it won't go away */
3097 detach_mnt(new_mnt
, &parent_path
);
3098 detach_mnt(root_mnt
, &root_parent
);
3099 if (root_mnt
->mnt
.mnt_flags
& MNT_LOCKED
) {
3100 new_mnt
->mnt
.mnt_flags
|= MNT_LOCKED
;
3101 root_mnt
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
3103 /* mount old root on put_old */
3104 attach_mnt(root_mnt
, old_mnt
, old_mp
);
3105 /* mount new_root on / */
3106 attach_mnt(new_mnt
, real_mount(root_parent
.mnt
), root_mp
);
3107 touch_mnt_namespace(current
->nsproxy
->mnt_ns
);
3108 /* A moved mount should not expire automatically */
3109 list_del_init(&new_mnt
->mnt_expire
);
3110 unlock_mount_hash();
3111 chroot_fs_refs(&root
, &new);
3112 put_mountpoint(root_mp
);
3115 unlock_mount(old_mp
);
3117 path_put(&root_parent
);
3118 path_put(&parent_path
);
3130 static void __init
init_mount_tree(void)
3132 struct vfsmount
*mnt
;
3133 struct mnt_namespace
*ns
;
3135 struct file_system_type
*type
;
3137 type
= get_fs_type("rootfs");
3139 panic("Can't find rootfs type");
3140 mnt
= vfs_kern_mount(type
, 0, "rootfs", NULL
);
3141 put_filesystem(type
);
3143 panic("Can't create rootfs");
3145 ns
= create_mnt_ns(mnt
);
3147 panic("Can't allocate initial namespace");
3149 init_task
.nsproxy
->mnt_ns
= ns
;
3153 root
.dentry
= mnt
->mnt_root
;
3154 mnt
->mnt_flags
|= MNT_LOCKED
;
3156 set_fs_pwd(current
->fs
, &root
);
3157 set_fs_root(current
->fs
, &root
);
3160 void __init
mnt_init(void)
3165 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct mount
),
3166 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
);
3168 mount_hashtable
= alloc_large_system_hash("Mount-cache",
3169 sizeof(struct hlist_head
),
3172 &m_hash_shift
, &m_hash_mask
, 0, 0);
3173 mountpoint_hashtable
= alloc_large_system_hash("Mountpoint-cache",
3174 sizeof(struct hlist_head
),
3177 &mp_hash_shift
, &mp_hash_mask
, 0, 0);
3179 if (!mount_hashtable
|| !mountpoint_hashtable
)
3180 panic("Failed to allocate mount hash table\n");
3182 for (u
= 0; u
<= m_hash_mask
; u
++)
3183 INIT_HLIST_HEAD(&mount_hashtable
[u
]);
3184 for (u
= 0; u
<= mp_hash_mask
; u
++)
3185 INIT_HLIST_HEAD(&mountpoint_hashtable
[u
]);
3191 printk(KERN_WARNING
"%s: sysfs_init error: %d\n",
3193 fs_kobj
= kobject_create_and_add("fs", NULL
);
3195 printk(KERN_WARNING
"%s: kobj create error\n", __func__
);
3200 void put_mnt_ns(struct mnt_namespace
*ns
)
3202 if (!atomic_dec_and_test(&ns
->count
))
3204 drop_collected_mounts(&ns
->root
->mnt
);
3208 struct vfsmount
*kern_mount_data(struct file_system_type
*type
, void *data
)
3210 struct vfsmount
*mnt
;
3211 mnt
= vfs_kern_mount(type
, MS_KERNMOUNT
, type
->name
, data
);
3214 * it is a longterm mount, don't release mnt until
3215 * we unmount before file sys is unregistered
3217 real_mount(mnt
)->mnt_ns
= MNT_NS_INTERNAL
;
3221 EXPORT_SYMBOL_GPL(kern_mount_data
);
3223 void kern_unmount(struct vfsmount
*mnt
)
3225 /* release long term mount so mount point can be released */
3226 if (!IS_ERR_OR_NULL(mnt
)) {
3227 real_mount(mnt
)->mnt_ns
= NULL
;
3228 synchronize_rcu(); /* yecchhh... */
3232 EXPORT_SYMBOL(kern_unmount
);
3234 bool our_mnt(struct vfsmount
*mnt
)
3236 return check_mnt(real_mount(mnt
));
3239 bool current_chrooted(void)
3241 /* Does the current process have a non-standard root */
3242 struct path ns_root
;
3243 struct path fs_root
;
3246 /* Find the namespace root */
3247 ns_root
.mnt
= ¤t
->nsproxy
->mnt_ns
->root
->mnt
;
3248 ns_root
.dentry
= ns_root
.mnt
->mnt_root
;
3250 while (d_mountpoint(ns_root
.dentry
) && follow_down_one(&ns_root
))
3253 get_fs_root(current
->fs
, &fs_root
);
3255 chrooted
= !path_equal(&fs_root
, &ns_root
);
3263 static bool fs_fully_visible(struct file_system_type
*type
, int *new_mnt_flags
)
3265 struct mnt_namespace
*ns
= current
->nsproxy
->mnt_ns
;
3266 int new_flags
= *new_mnt_flags
;
3268 bool visible
= false;
3273 down_read(&namespace_sem
);
3274 list_for_each_entry(mnt
, &ns
->list
, mnt_list
) {
3275 struct mount
*child
;
3278 if (mnt
->mnt
.mnt_sb
->s_type
!= type
)
3281 /* This mount is not fully visible if it's root directory
3282 * is not the root directory of the filesystem.
3284 if (mnt
->mnt
.mnt_root
!= mnt
->mnt
.mnt_sb
->s_root
)
3287 /* Read the mount flags and filter out flags that
3288 * may safely be ignored.
3290 mnt_flags
= mnt
->mnt
.mnt_flags
;
3291 if (mnt
->mnt
.mnt_sb
->s_iflags
& SB_I_NOEXEC
)
3292 mnt_flags
&= ~(MNT_LOCK_NOSUID
| MNT_LOCK_NOEXEC
);
3294 /* Don't miss readonly hidden in the superblock flags */
3295 if (mnt
->mnt
.mnt_sb
->s_flags
& MS_RDONLY
)
3296 mnt_flags
|= MNT_LOCK_READONLY
;
3298 /* Verify the mount flags are equal to or more permissive
3299 * than the proposed new mount.
3301 if ((mnt_flags
& MNT_LOCK_READONLY
) &&
3302 !(new_flags
& MNT_READONLY
))
3304 if ((mnt_flags
& MNT_LOCK_NODEV
) &&
3305 !(new_flags
& MNT_NODEV
))
3307 if ((mnt_flags
& MNT_LOCK_NOSUID
) &&
3308 !(new_flags
& MNT_NOSUID
))
3310 if ((mnt_flags
& MNT_LOCK_NOEXEC
) &&
3311 !(new_flags
& MNT_NOEXEC
))
3313 if ((mnt_flags
& MNT_LOCK_ATIME
) &&
3314 ((mnt_flags
& MNT_ATIME_MASK
) != (new_flags
& MNT_ATIME_MASK
)))
3317 /* This mount is not fully visible if there are any
3318 * locked child mounts that cover anything except for
3319 * empty directories.
3321 list_for_each_entry(child
, &mnt
->mnt_mounts
, mnt_child
) {
3322 struct inode
*inode
= child
->mnt_mountpoint
->d_inode
;
3323 /* Only worry about locked mounts */
3324 if (!(child
->mnt
.mnt_flags
& MNT_LOCKED
))
3326 /* Is the directory permanetly empty? */
3327 if (!is_empty_dir_inode(inode
))
3330 /* Preserve the locked attributes */
3331 *new_mnt_flags
|= mnt_flags
& (MNT_LOCK_READONLY
| \
3341 up_read(&namespace_sem
);
3345 bool mnt_may_suid(struct vfsmount
*mnt
)
3348 * Foreign mounts (accessed via fchdir or through /proc
3349 * symlinks) are always treated as if they are nosuid. This
3350 * prevents namespaces from trusting potentially unsafe
3351 * suid/sgid bits, file caps, or security labels that originate
3352 * in other namespaces.
3354 return !(mnt
->mnt_flags
& MNT_NOSUID
) && check_mnt(real_mount(mnt
)) &&
3355 current_in_userns(mnt
->mnt_sb
->s_user_ns
);
3358 static struct ns_common
*mntns_get(struct task_struct
*task
)
3360 struct ns_common
*ns
= NULL
;
3361 struct nsproxy
*nsproxy
;
3364 nsproxy
= task
->nsproxy
;
3366 ns
= &nsproxy
->mnt_ns
->ns
;
3367 get_mnt_ns(to_mnt_ns(ns
));
3374 static void mntns_put(struct ns_common
*ns
)
3376 put_mnt_ns(to_mnt_ns(ns
));
3379 static int mntns_install(struct nsproxy
*nsproxy
, struct ns_common
*ns
)
3381 struct fs_struct
*fs
= current
->fs
;
3382 struct mnt_namespace
*mnt_ns
= to_mnt_ns(ns
);
3385 if (!ns_capable(mnt_ns
->user_ns
, CAP_SYS_ADMIN
) ||
3386 !ns_capable(current_user_ns(), CAP_SYS_CHROOT
) ||
3387 !ns_capable(current_user_ns(), CAP_SYS_ADMIN
))
3394 put_mnt_ns(nsproxy
->mnt_ns
);
3395 nsproxy
->mnt_ns
= mnt_ns
;
3398 root
.mnt
= &mnt_ns
->root
->mnt
;
3399 root
.dentry
= mnt_ns
->root
->mnt
.mnt_root
;
3401 while(d_mountpoint(root
.dentry
) && follow_down_one(&root
))
3404 /* Update the pwd and root */
3405 set_fs_pwd(fs
, &root
);
3406 set_fs_root(fs
, &root
);
3412 const struct proc_ns_operations mntns_operations
= {
3414 .type
= CLONE_NEWNS
,
3417 .install
= mntns_install
,