4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/uaccess.h>
24 #include <linux/proc_ns.h>
25 #include <linux/magic.h>
26 #include <linux/memblock.h>
27 #include <linux/task_work.h>
28 #include <linux/sched/task.h>
29 #include <uapi/linux/mount.h>
30 #include <linux/fs_context.h>
35 /* Maximum number of mounts in a mount namespace */
36 unsigned int sysctl_mount_max __read_mostly
= 100000;
38 static unsigned int m_hash_mask __read_mostly
;
39 static unsigned int m_hash_shift __read_mostly
;
40 static unsigned int mp_hash_mask __read_mostly
;
41 static unsigned int mp_hash_shift __read_mostly
;
43 static __initdata
unsigned long mhash_entries
;
44 static int __init
set_mhash_entries(char *str
)
48 mhash_entries
= simple_strtoul(str
, &str
, 0);
51 __setup("mhash_entries=", set_mhash_entries
);
53 static __initdata
unsigned long mphash_entries
;
54 static int __init
set_mphash_entries(char *str
)
58 mphash_entries
= simple_strtoul(str
, &str
, 0);
61 __setup("mphash_entries=", set_mphash_entries
);
64 static DEFINE_IDA(mnt_id_ida
);
65 static DEFINE_IDA(mnt_group_ida
);
67 static struct hlist_head
*mount_hashtable __read_mostly
;
68 static struct hlist_head
*mountpoint_hashtable __read_mostly
;
69 static struct kmem_cache
*mnt_cache __read_mostly
;
70 static DECLARE_RWSEM(namespace_sem
);
73 struct kobject
*fs_kobj
;
74 EXPORT_SYMBOL_GPL(fs_kobj
);
77 * vfsmount lock may be taken for read to prevent changes to the
78 * vfsmount hash, ie. during mountpoint lookups or walking back
81 * It should be taken for write in all cases where the vfsmount
82 * tree or hash is modified or when a vfsmount structure is modified.
84 __cacheline_aligned_in_smp
DEFINE_SEQLOCK(mount_lock
);
86 static inline struct hlist_head
*m_hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
88 unsigned long tmp
= ((unsigned long)mnt
/ L1_CACHE_BYTES
);
89 tmp
+= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
90 tmp
= tmp
+ (tmp
>> m_hash_shift
);
91 return &mount_hashtable
[tmp
& m_hash_mask
];
94 static inline struct hlist_head
*mp_hash(struct dentry
*dentry
)
96 unsigned long tmp
= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
97 tmp
= tmp
+ (tmp
>> mp_hash_shift
);
98 return &mountpoint_hashtable
[tmp
& mp_hash_mask
];
101 static int mnt_alloc_id(struct mount
*mnt
)
103 int res
= ida_alloc(&mnt_id_ida
, GFP_KERNEL
);
111 static void mnt_free_id(struct mount
*mnt
)
113 ida_free(&mnt_id_ida
, mnt
->mnt_id
);
117 * Allocate a new peer group ID
119 static int mnt_alloc_group_id(struct mount
*mnt
)
121 int res
= ida_alloc_min(&mnt_group_ida
, 1, GFP_KERNEL
);
125 mnt
->mnt_group_id
= res
;
130 * Release a peer group ID
132 void mnt_release_group_id(struct mount
*mnt
)
134 ida_free(&mnt_group_ida
, mnt
->mnt_group_id
);
135 mnt
->mnt_group_id
= 0;
139 * vfsmount lock must be held for read
141 static inline void mnt_add_count(struct mount
*mnt
, int n
)
144 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, n
);
153 * vfsmount lock must be held for write
155 unsigned int mnt_get_count(struct mount
*mnt
)
158 unsigned int count
= 0;
161 for_each_possible_cpu(cpu
) {
162 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_count
;
167 return mnt
->mnt_count
;
171 static void drop_mountpoint(struct fs_pin
*p
)
173 struct mount
*m
= container_of(p
, struct mount
, mnt_umount
);
174 dput(m
->mnt_ex_mountpoint
);
179 static struct mount
*alloc_vfsmnt(const char *name
)
181 struct mount
*mnt
= kmem_cache_zalloc(mnt_cache
, GFP_KERNEL
);
185 err
= mnt_alloc_id(mnt
);
190 mnt
->mnt_devname
= kstrdup_const(name
, GFP_KERNEL
);
191 if (!mnt
->mnt_devname
)
196 mnt
->mnt_pcp
= alloc_percpu(struct mnt_pcp
);
198 goto out_free_devname
;
200 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, 1);
203 mnt
->mnt_writers
= 0;
206 INIT_HLIST_NODE(&mnt
->mnt_hash
);
207 INIT_LIST_HEAD(&mnt
->mnt_child
);
208 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
209 INIT_LIST_HEAD(&mnt
->mnt_list
);
210 INIT_LIST_HEAD(&mnt
->mnt_expire
);
211 INIT_LIST_HEAD(&mnt
->mnt_share
);
212 INIT_LIST_HEAD(&mnt
->mnt_slave_list
);
213 INIT_LIST_HEAD(&mnt
->mnt_slave
);
214 INIT_HLIST_NODE(&mnt
->mnt_mp_list
);
215 INIT_LIST_HEAD(&mnt
->mnt_umounting
);
216 init_fs_pin(&mnt
->mnt_umount
, drop_mountpoint
);
222 kfree_const(mnt
->mnt_devname
);
227 kmem_cache_free(mnt_cache
, mnt
);
232 * Most r/o checks on a fs are for operations that take
233 * discrete amounts of time, like a write() or unlink().
234 * We must keep track of when those operations start
235 * (for permission checks) and when they end, so that
236 * we can determine when writes are able to occur to
240 * __mnt_is_readonly: check whether a mount is read-only
241 * @mnt: the mount to check for its write status
243 * This shouldn't be used directly ouside of the VFS.
244 * It does not guarantee that the filesystem will stay
245 * r/w, just that it is right *now*. This can not and
246 * should not be used in place of IS_RDONLY(inode).
247 * mnt_want/drop_write() will _keep_ the filesystem
250 bool __mnt_is_readonly(struct vfsmount
*mnt
)
252 return (mnt
->mnt_flags
& MNT_READONLY
) || sb_rdonly(mnt
->mnt_sb
);
254 EXPORT_SYMBOL_GPL(__mnt_is_readonly
);
256 static inline void mnt_inc_writers(struct mount
*mnt
)
259 this_cpu_inc(mnt
->mnt_pcp
->mnt_writers
);
265 static inline void mnt_dec_writers(struct mount
*mnt
)
268 this_cpu_dec(mnt
->mnt_pcp
->mnt_writers
);
274 static unsigned int mnt_get_writers(struct mount
*mnt
)
277 unsigned int count
= 0;
280 for_each_possible_cpu(cpu
) {
281 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_writers
;
286 return mnt
->mnt_writers
;
290 static int mnt_is_readonly(struct vfsmount
*mnt
)
292 if (mnt
->mnt_sb
->s_readonly_remount
)
294 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
296 return __mnt_is_readonly(mnt
);
300 * Most r/o & frozen checks on a fs are for operations that take discrete
301 * amounts of time, like a write() or unlink(). We must keep track of when
302 * those operations start (for permission checks) and when they end, so that we
303 * can determine when writes are able to occur to a filesystem.
306 * __mnt_want_write - get write access to a mount without freeze protection
307 * @m: the mount on which to take a write
309 * This tells the low-level filesystem that a write is about to be performed to
310 * it, and makes sure that writes are allowed (mnt it read-write) before
311 * returning success. This operation does not protect against filesystem being
312 * frozen. When the write operation is finished, __mnt_drop_write() must be
313 * called. This is effectively a refcount.
315 int __mnt_want_write(struct vfsmount
*m
)
317 struct mount
*mnt
= real_mount(m
);
321 mnt_inc_writers(mnt
);
323 * The store to mnt_inc_writers must be visible before we pass
324 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
325 * incremented count after it has set MNT_WRITE_HOLD.
328 while (READ_ONCE(mnt
->mnt
.mnt_flags
) & MNT_WRITE_HOLD
)
331 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
332 * be set to match its requirements. So we must not load that until
333 * MNT_WRITE_HOLD is cleared.
336 if (mnt_is_readonly(m
)) {
337 mnt_dec_writers(mnt
);
346 * mnt_want_write - get write access to a mount
347 * @m: the mount on which to take a write
349 * This tells the low-level filesystem that a write is about to be performed to
350 * it, and makes sure that writes are allowed (mount is read-write, filesystem
351 * is not frozen) before returning success. When the write operation is
352 * finished, mnt_drop_write() must be called. This is effectively a refcount.
354 int mnt_want_write(struct vfsmount
*m
)
358 sb_start_write(m
->mnt_sb
);
359 ret
= __mnt_want_write(m
);
361 sb_end_write(m
->mnt_sb
);
364 EXPORT_SYMBOL_GPL(mnt_want_write
);
367 * mnt_clone_write - get write access to a mount
368 * @mnt: the mount on which to take a write
370 * This is effectively like mnt_want_write, except
371 * it must only be used to take an extra write reference
372 * on a mountpoint that we already know has a write reference
373 * on it. This allows some optimisation.
375 * After finished, mnt_drop_write must be called as usual to
376 * drop the reference.
378 int mnt_clone_write(struct vfsmount
*mnt
)
380 /* superblock may be r/o */
381 if (__mnt_is_readonly(mnt
))
384 mnt_inc_writers(real_mount(mnt
));
388 EXPORT_SYMBOL_GPL(mnt_clone_write
);
391 * __mnt_want_write_file - get write access to a file's mount
392 * @file: the file who's mount on which to take a write
394 * This is like __mnt_want_write, but it takes a file and can
395 * do some optimisations if the file is open for write already
397 int __mnt_want_write_file(struct file
*file
)
399 if (!(file
->f_mode
& FMODE_WRITER
))
400 return __mnt_want_write(file
->f_path
.mnt
);
402 return mnt_clone_write(file
->f_path
.mnt
);
406 * mnt_want_write_file - get write access to a file's mount
407 * @file: the file who's mount on which to take a write
409 * This is like mnt_want_write, but it takes a file and can
410 * do some optimisations if the file is open for write already
412 int mnt_want_write_file(struct file
*file
)
416 sb_start_write(file_inode(file
)->i_sb
);
417 ret
= __mnt_want_write_file(file
);
419 sb_end_write(file_inode(file
)->i_sb
);
422 EXPORT_SYMBOL_GPL(mnt_want_write_file
);
425 * __mnt_drop_write - give up write access to a mount
426 * @mnt: the mount on which to give up write access
428 * Tells the low-level filesystem that we are done
429 * performing writes to it. Must be matched with
430 * __mnt_want_write() call above.
432 void __mnt_drop_write(struct vfsmount
*mnt
)
435 mnt_dec_writers(real_mount(mnt
));
440 * mnt_drop_write - give up write access to a mount
441 * @mnt: the mount on which to give up write access
443 * Tells the low-level filesystem that we are done performing writes to it and
444 * also allows filesystem to be frozen again. Must be matched with
445 * mnt_want_write() call above.
447 void mnt_drop_write(struct vfsmount
*mnt
)
449 __mnt_drop_write(mnt
);
450 sb_end_write(mnt
->mnt_sb
);
452 EXPORT_SYMBOL_GPL(mnt_drop_write
);
454 void __mnt_drop_write_file(struct file
*file
)
456 __mnt_drop_write(file
->f_path
.mnt
);
459 void mnt_drop_write_file(struct file
*file
)
461 __mnt_drop_write_file(file
);
462 sb_end_write(file_inode(file
)->i_sb
);
464 EXPORT_SYMBOL(mnt_drop_write_file
);
466 static int mnt_make_readonly(struct mount
*mnt
)
471 mnt
->mnt
.mnt_flags
|= MNT_WRITE_HOLD
;
473 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
474 * should be visible before we do.
479 * With writers on hold, if this value is zero, then there are
480 * definitely no active writers (although held writers may subsequently
481 * increment the count, they'll have to wait, and decrement it after
482 * seeing MNT_READONLY).
484 * It is OK to have counter incremented on one CPU and decremented on
485 * another: the sum will add up correctly. The danger would be when we
486 * sum up each counter, if we read a counter before it is incremented,
487 * but then read another CPU's count which it has been subsequently
488 * decremented from -- we would see more decrements than we should.
489 * MNT_WRITE_HOLD protects against this scenario, because
490 * mnt_want_write first increments count, then smp_mb, then spins on
491 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
492 * we're counting up here.
494 if (mnt_get_writers(mnt
) > 0)
497 mnt
->mnt
.mnt_flags
|= MNT_READONLY
;
499 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
500 * that become unheld will see MNT_READONLY.
503 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
508 static int __mnt_unmake_readonly(struct mount
*mnt
)
511 mnt
->mnt
.mnt_flags
&= ~MNT_READONLY
;
516 int sb_prepare_remount_readonly(struct super_block
*sb
)
521 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
522 if (atomic_long_read(&sb
->s_remove_count
))
526 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
527 if (!(mnt
->mnt
.mnt_flags
& MNT_READONLY
)) {
528 mnt
->mnt
.mnt_flags
|= MNT_WRITE_HOLD
;
530 if (mnt_get_writers(mnt
) > 0) {
536 if (!err
&& atomic_long_read(&sb
->s_remove_count
))
540 sb
->s_readonly_remount
= 1;
543 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
544 if (mnt
->mnt
.mnt_flags
& MNT_WRITE_HOLD
)
545 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
552 static void free_vfsmnt(struct mount
*mnt
)
554 kfree_const(mnt
->mnt_devname
);
556 free_percpu(mnt
->mnt_pcp
);
558 kmem_cache_free(mnt_cache
, mnt
);
561 static void delayed_free_vfsmnt(struct rcu_head
*head
)
563 free_vfsmnt(container_of(head
, struct mount
, mnt_rcu
));
566 /* call under rcu_read_lock */
567 int __legitimize_mnt(struct vfsmount
*bastard
, unsigned seq
)
570 if (read_seqretry(&mount_lock
, seq
))
574 mnt
= real_mount(bastard
);
575 mnt_add_count(mnt
, 1);
576 smp_mb(); // see mntput_no_expire()
577 if (likely(!read_seqretry(&mount_lock
, seq
)))
579 if (bastard
->mnt_flags
& MNT_SYNC_UMOUNT
) {
580 mnt_add_count(mnt
, -1);
584 if (unlikely(bastard
->mnt_flags
& MNT_DOOMED
)) {
585 mnt_add_count(mnt
, -1);
590 /* caller will mntput() */
594 /* call under rcu_read_lock */
595 bool legitimize_mnt(struct vfsmount
*bastard
, unsigned seq
)
597 int res
= __legitimize_mnt(bastard
, seq
);
600 if (unlikely(res
< 0)) {
609 * find the first mount at @dentry on vfsmount @mnt.
610 * call under rcu_read_lock()
612 struct mount
*__lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
)
614 struct hlist_head
*head
= m_hash(mnt
, dentry
);
617 hlist_for_each_entry_rcu(p
, head
, mnt_hash
)
618 if (&p
->mnt_parent
->mnt
== mnt
&& p
->mnt_mountpoint
== dentry
)
624 * lookup_mnt - Return the first child mount mounted at path
626 * "First" means first mounted chronologically. If you create the
629 * mount /dev/sda1 /mnt
630 * mount /dev/sda2 /mnt
631 * mount /dev/sda3 /mnt
633 * Then lookup_mnt() on the base /mnt dentry in the root mount will
634 * return successively the root dentry and vfsmount of /dev/sda1, then
635 * /dev/sda2, then /dev/sda3, then NULL.
637 * lookup_mnt takes a reference to the found vfsmount.
639 struct vfsmount
*lookup_mnt(const struct path
*path
)
641 struct mount
*child_mnt
;
647 seq
= read_seqbegin(&mount_lock
);
648 child_mnt
= __lookup_mnt(path
->mnt
, path
->dentry
);
649 m
= child_mnt
? &child_mnt
->mnt
: NULL
;
650 } while (!legitimize_mnt(m
, seq
));
656 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
657 * current mount namespace.
659 * The common case is dentries are not mountpoints at all and that
660 * test is handled inline. For the slow case when we are actually
661 * dealing with a mountpoint of some kind, walk through all of the
662 * mounts in the current mount namespace and test to see if the dentry
665 * The mount_hashtable is not usable in the context because we
666 * need to identify all mounts that may be in the current mount
667 * namespace not just a mount that happens to have some specified
670 bool __is_local_mountpoint(struct dentry
*dentry
)
672 struct mnt_namespace
*ns
= current
->nsproxy
->mnt_ns
;
674 bool is_covered
= false;
676 if (!d_mountpoint(dentry
))
679 down_read(&namespace_sem
);
680 list_for_each_entry(mnt
, &ns
->list
, mnt_list
) {
681 is_covered
= (mnt
->mnt_mountpoint
== dentry
);
685 up_read(&namespace_sem
);
690 static struct mountpoint
*lookup_mountpoint(struct dentry
*dentry
)
692 struct hlist_head
*chain
= mp_hash(dentry
);
693 struct mountpoint
*mp
;
695 hlist_for_each_entry(mp
, chain
, m_hash
) {
696 if (mp
->m_dentry
== dentry
) {
704 static struct mountpoint
*get_mountpoint(struct dentry
*dentry
)
706 struct mountpoint
*mp
, *new = NULL
;
709 if (d_mountpoint(dentry
)) {
710 /* might be worth a WARN_ON() */
711 if (d_unlinked(dentry
))
712 return ERR_PTR(-ENOENT
);
714 read_seqlock_excl(&mount_lock
);
715 mp
= lookup_mountpoint(dentry
);
716 read_sequnlock_excl(&mount_lock
);
722 new = kmalloc(sizeof(struct mountpoint
), GFP_KERNEL
);
724 return ERR_PTR(-ENOMEM
);
727 /* Exactly one processes may set d_mounted */
728 ret
= d_set_mounted(dentry
);
730 /* Someone else set d_mounted? */
734 /* The dentry is not available as a mountpoint? */
739 /* Add the new mountpoint to the hash table */
740 read_seqlock_excl(&mount_lock
);
741 new->m_dentry
= dentry
;
743 hlist_add_head(&new->m_hash
, mp_hash(dentry
));
744 INIT_HLIST_HEAD(&new->m_list
);
745 read_sequnlock_excl(&mount_lock
);
754 static void put_mountpoint(struct mountpoint
*mp
)
756 if (!--mp
->m_count
) {
757 struct dentry
*dentry
= mp
->m_dentry
;
758 BUG_ON(!hlist_empty(&mp
->m_list
));
759 spin_lock(&dentry
->d_lock
);
760 dentry
->d_flags
&= ~DCACHE_MOUNTED
;
761 spin_unlock(&dentry
->d_lock
);
762 hlist_del(&mp
->m_hash
);
767 static inline int check_mnt(struct mount
*mnt
)
769 return mnt
->mnt_ns
== current
->nsproxy
->mnt_ns
;
773 * vfsmount lock must be held for write
775 static void touch_mnt_namespace(struct mnt_namespace
*ns
)
779 wake_up_interruptible(&ns
->poll
);
784 * vfsmount lock must be held for write
786 static void __touch_mnt_namespace(struct mnt_namespace
*ns
)
788 if (ns
&& ns
->event
!= event
) {
790 wake_up_interruptible(&ns
->poll
);
795 * vfsmount lock must be held for write
797 static void unhash_mnt(struct mount
*mnt
)
799 mnt
->mnt_parent
= mnt
;
800 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
801 list_del_init(&mnt
->mnt_child
);
802 hlist_del_init_rcu(&mnt
->mnt_hash
);
803 hlist_del_init(&mnt
->mnt_mp_list
);
804 put_mountpoint(mnt
->mnt_mp
);
809 * vfsmount lock must be held for write
811 static void detach_mnt(struct mount
*mnt
, struct path
*old_path
)
813 old_path
->dentry
= mnt
->mnt_mountpoint
;
814 old_path
->mnt
= &mnt
->mnt_parent
->mnt
;
819 * vfsmount lock must be held for write
821 static void umount_mnt(struct mount
*mnt
)
823 /* old mountpoint will be dropped when we can do that */
824 mnt
->mnt_ex_mountpoint
= mnt
->mnt_mountpoint
;
829 * vfsmount lock must be held for write
831 void mnt_set_mountpoint(struct mount
*mnt
,
832 struct mountpoint
*mp
,
833 struct mount
*child_mnt
)
836 mnt_add_count(mnt
, 1); /* essentially, that's mntget */
837 child_mnt
->mnt_mountpoint
= dget(mp
->m_dentry
);
838 child_mnt
->mnt_parent
= mnt
;
839 child_mnt
->mnt_mp
= mp
;
840 hlist_add_head(&child_mnt
->mnt_mp_list
, &mp
->m_list
);
843 static void __attach_mnt(struct mount
*mnt
, struct mount
*parent
)
845 hlist_add_head_rcu(&mnt
->mnt_hash
,
846 m_hash(&parent
->mnt
, mnt
->mnt_mountpoint
));
847 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
851 * vfsmount lock must be held for write
853 static void attach_mnt(struct mount
*mnt
,
854 struct mount
*parent
,
855 struct mountpoint
*mp
)
857 mnt_set_mountpoint(parent
, mp
, mnt
);
858 __attach_mnt(mnt
, parent
);
861 void mnt_change_mountpoint(struct mount
*parent
, struct mountpoint
*mp
, struct mount
*mnt
)
863 struct mountpoint
*old_mp
= mnt
->mnt_mp
;
864 struct dentry
*old_mountpoint
= mnt
->mnt_mountpoint
;
865 struct mount
*old_parent
= mnt
->mnt_parent
;
867 list_del_init(&mnt
->mnt_child
);
868 hlist_del_init(&mnt
->mnt_mp_list
);
869 hlist_del_init_rcu(&mnt
->mnt_hash
);
871 attach_mnt(mnt
, parent
, mp
);
873 put_mountpoint(old_mp
);
876 * Safely avoid even the suggestion this code might sleep or
877 * lock the mount hash by taking advantage of the knowledge that
878 * mnt_change_mountpoint will not release the final reference
881 * During mounting, the mount passed in as the parent mount will
882 * continue to use the old mountpoint and during unmounting, the
883 * old mountpoint will continue to exist until namespace_unlock,
884 * which happens well after mnt_change_mountpoint.
886 spin_lock(&old_mountpoint
->d_lock
);
887 old_mountpoint
->d_lockref
.count
--;
888 spin_unlock(&old_mountpoint
->d_lock
);
890 mnt_add_count(old_parent
, -1);
894 * vfsmount lock must be held for write
896 static void commit_tree(struct mount
*mnt
)
898 struct mount
*parent
= mnt
->mnt_parent
;
901 struct mnt_namespace
*n
= parent
->mnt_ns
;
903 BUG_ON(parent
== mnt
);
905 list_add_tail(&head
, &mnt
->mnt_list
);
906 list_for_each_entry(m
, &head
, mnt_list
)
909 list_splice(&head
, n
->list
.prev
);
911 n
->mounts
+= n
->pending_mounts
;
912 n
->pending_mounts
= 0;
914 __attach_mnt(mnt
, parent
);
915 touch_mnt_namespace(n
);
918 static struct mount
*next_mnt(struct mount
*p
, struct mount
*root
)
920 struct list_head
*next
= p
->mnt_mounts
.next
;
921 if (next
== &p
->mnt_mounts
) {
925 next
= p
->mnt_child
.next
;
926 if (next
!= &p
->mnt_parent
->mnt_mounts
)
931 return list_entry(next
, struct mount
, mnt_child
);
934 static struct mount
*skip_mnt_tree(struct mount
*p
)
936 struct list_head
*prev
= p
->mnt_mounts
.prev
;
937 while (prev
!= &p
->mnt_mounts
) {
938 p
= list_entry(prev
, struct mount
, mnt_child
);
939 prev
= p
->mnt_mounts
.prev
;
945 * vfs_create_mount - Create a mount for a configured superblock
946 * @fc: The configuration context with the superblock attached
948 * Create a mount to an already configured superblock. If necessary, the
949 * caller should invoke vfs_get_tree() before calling this.
951 * Note that this does not attach the mount to anything.
953 struct vfsmount
*vfs_create_mount(struct fs_context
*fc
)
958 return ERR_PTR(-EINVAL
);
960 mnt
= alloc_vfsmnt(fc
->source
?: "none");
962 return ERR_PTR(-ENOMEM
);
964 if (fc
->sb_flags
& SB_KERNMOUNT
)
965 mnt
->mnt
.mnt_flags
= MNT_INTERNAL
;
967 atomic_inc(&fc
->root
->d_sb
->s_active
);
968 mnt
->mnt
.mnt_sb
= fc
->root
->d_sb
;
969 mnt
->mnt
.mnt_root
= dget(fc
->root
);
970 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
971 mnt
->mnt_parent
= mnt
;
974 list_add_tail(&mnt
->mnt_instance
, &mnt
->mnt
.mnt_sb
->s_mounts
);
978 EXPORT_SYMBOL(vfs_create_mount
);
980 struct vfsmount
*fc_mount(struct fs_context
*fc
)
982 int err
= vfs_get_tree(fc
);
984 up_write(&fc
->root
->d_sb
->s_umount
);
985 return vfs_create_mount(fc
);
989 EXPORT_SYMBOL(fc_mount
);
991 struct vfsmount
*vfs_kern_mount(struct file_system_type
*type
,
992 int flags
, const char *name
,
995 struct fs_context
*fc
;
996 struct vfsmount
*mnt
;
1000 return ERR_PTR(-EINVAL
);
1002 fc
= fs_context_for_mount(type
, flags
);
1004 return ERR_CAST(fc
);
1007 ret
= vfs_parse_fs_string(fc
, "source",
1008 name
, strlen(name
));
1010 ret
= parse_monolithic_mount_data(fc
, data
);
1019 EXPORT_SYMBOL_GPL(vfs_kern_mount
);
1022 vfs_submount(const struct dentry
*mountpoint
, struct file_system_type
*type
,
1023 const char *name
, void *data
)
1025 /* Until it is worked out how to pass the user namespace
1026 * through from the parent mount to the submount don't support
1027 * unprivileged mounts with submounts.
1029 if (mountpoint
->d_sb
->s_user_ns
!= &init_user_ns
)
1030 return ERR_PTR(-EPERM
);
1032 return vfs_kern_mount(type
, SB_SUBMOUNT
, name
, data
);
1034 EXPORT_SYMBOL_GPL(vfs_submount
);
1036 static struct mount
*clone_mnt(struct mount
*old
, struct dentry
*root
,
1039 struct super_block
*sb
= old
->mnt
.mnt_sb
;
1043 mnt
= alloc_vfsmnt(old
->mnt_devname
);
1045 return ERR_PTR(-ENOMEM
);
1047 if (flag
& (CL_SLAVE
| CL_PRIVATE
| CL_SHARED_TO_SLAVE
))
1048 mnt
->mnt_group_id
= 0; /* not a peer of original */
1050 mnt
->mnt_group_id
= old
->mnt_group_id
;
1052 if ((flag
& CL_MAKE_SHARED
) && !mnt
->mnt_group_id
) {
1053 err
= mnt_alloc_group_id(mnt
);
1058 mnt
->mnt
.mnt_flags
= old
->mnt
.mnt_flags
;
1059 mnt
->mnt
.mnt_flags
&= ~(MNT_WRITE_HOLD
|MNT_MARKED
|MNT_INTERNAL
);
1061 atomic_inc(&sb
->s_active
);
1062 mnt
->mnt
.mnt_sb
= sb
;
1063 mnt
->mnt
.mnt_root
= dget(root
);
1064 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
1065 mnt
->mnt_parent
= mnt
;
1067 list_add_tail(&mnt
->mnt_instance
, &sb
->s_mounts
);
1068 unlock_mount_hash();
1070 if ((flag
& CL_SLAVE
) ||
1071 ((flag
& CL_SHARED_TO_SLAVE
) && IS_MNT_SHARED(old
))) {
1072 list_add(&mnt
->mnt_slave
, &old
->mnt_slave_list
);
1073 mnt
->mnt_master
= old
;
1074 CLEAR_MNT_SHARED(mnt
);
1075 } else if (!(flag
& CL_PRIVATE
)) {
1076 if ((flag
& CL_MAKE_SHARED
) || IS_MNT_SHARED(old
))
1077 list_add(&mnt
->mnt_share
, &old
->mnt_share
);
1078 if (IS_MNT_SLAVE(old
))
1079 list_add(&mnt
->mnt_slave
, &old
->mnt_slave
);
1080 mnt
->mnt_master
= old
->mnt_master
;
1082 CLEAR_MNT_SHARED(mnt
);
1084 if (flag
& CL_MAKE_SHARED
)
1085 set_mnt_shared(mnt
);
1087 /* stick the duplicate mount on the same expiry list
1088 * as the original if that was on one */
1089 if (flag
& CL_EXPIRE
) {
1090 if (!list_empty(&old
->mnt_expire
))
1091 list_add(&mnt
->mnt_expire
, &old
->mnt_expire
);
1099 return ERR_PTR(err
);
1102 static void cleanup_mnt(struct mount
*mnt
)
1105 * This probably indicates that somebody messed
1106 * up a mnt_want/drop_write() pair. If this
1107 * happens, the filesystem was probably unable
1108 * to make r/w->r/o transitions.
1111 * The locking used to deal with mnt_count decrement provides barriers,
1112 * so mnt_get_writers() below is safe.
1114 WARN_ON(mnt_get_writers(mnt
));
1115 if (unlikely(mnt
->mnt_pins
.first
))
1117 fsnotify_vfsmount_delete(&mnt
->mnt
);
1118 dput(mnt
->mnt
.mnt_root
);
1119 deactivate_super(mnt
->mnt
.mnt_sb
);
1121 call_rcu(&mnt
->mnt_rcu
, delayed_free_vfsmnt
);
1124 static void __cleanup_mnt(struct rcu_head
*head
)
1126 cleanup_mnt(container_of(head
, struct mount
, mnt_rcu
));
1129 static LLIST_HEAD(delayed_mntput_list
);
1130 static void delayed_mntput(struct work_struct
*unused
)
1132 struct llist_node
*node
= llist_del_all(&delayed_mntput_list
);
1133 struct mount
*m
, *t
;
1135 llist_for_each_entry_safe(m
, t
, node
, mnt_llist
)
1138 static DECLARE_DELAYED_WORK(delayed_mntput_work
, delayed_mntput
);
1140 static void mntput_no_expire(struct mount
*mnt
)
1143 if (likely(READ_ONCE(mnt
->mnt_ns
))) {
1145 * Since we don't do lock_mount_hash() here,
1146 * ->mnt_ns can change under us. However, if it's
1147 * non-NULL, then there's a reference that won't
1148 * be dropped until after an RCU delay done after
1149 * turning ->mnt_ns NULL. So if we observe it
1150 * non-NULL under rcu_read_lock(), the reference
1151 * we are dropping is not the final one.
1153 mnt_add_count(mnt
, -1);
1159 * make sure that if __legitimize_mnt() has not seen us grab
1160 * mount_lock, we'll see their refcount increment here.
1163 mnt_add_count(mnt
, -1);
1164 if (mnt_get_count(mnt
)) {
1166 unlock_mount_hash();
1169 if (unlikely(mnt
->mnt
.mnt_flags
& MNT_DOOMED
)) {
1171 unlock_mount_hash();
1174 mnt
->mnt
.mnt_flags
|= MNT_DOOMED
;
1177 list_del(&mnt
->mnt_instance
);
1179 if (unlikely(!list_empty(&mnt
->mnt_mounts
))) {
1180 struct mount
*p
, *tmp
;
1181 list_for_each_entry_safe(p
, tmp
, &mnt
->mnt_mounts
, mnt_child
) {
1185 unlock_mount_hash();
1187 if (likely(!(mnt
->mnt
.mnt_flags
& MNT_INTERNAL
))) {
1188 struct task_struct
*task
= current
;
1189 if (likely(!(task
->flags
& PF_KTHREAD
))) {
1190 init_task_work(&mnt
->mnt_rcu
, __cleanup_mnt
);
1191 if (!task_work_add(task
, &mnt
->mnt_rcu
, true))
1194 if (llist_add(&mnt
->mnt_llist
, &delayed_mntput_list
))
1195 schedule_delayed_work(&delayed_mntput_work
, 1);
1201 void mntput(struct vfsmount
*mnt
)
1204 struct mount
*m
= real_mount(mnt
);
1205 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1206 if (unlikely(m
->mnt_expiry_mark
))
1207 m
->mnt_expiry_mark
= 0;
1208 mntput_no_expire(m
);
1211 EXPORT_SYMBOL(mntput
);
1213 struct vfsmount
*mntget(struct vfsmount
*mnt
)
1216 mnt_add_count(real_mount(mnt
), 1);
1219 EXPORT_SYMBOL(mntget
);
1221 /* path_is_mountpoint() - Check if path is a mount in the current
1224 * d_mountpoint() can only be used reliably to establish if a dentry is
1225 * not mounted in any namespace and that common case is handled inline.
1226 * d_mountpoint() isn't aware of the possibility there may be multiple
1227 * mounts using a given dentry in a different namespace. This function
1228 * checks if the passed in path is a mountpoint rather than the dentry
1231 bool path_is_mountpoint(const struct path
*path
)
1236 if (!d_mountpoint(path
->dentry
))
1241 seq
= read_seqbegin(&mount_lock
);
1242 res
= __path_is_mountpoint(path
);
1243 } while (read_seqretry(&mount_lock
, seq
));
1248 EXPORT_SYMBOL(path_is_mountpoint
);
1250 struct vfsmount
*mnt_clone_internal(const struct path
*path
)
1253 p
= clone_mnt(real_mount(path
->mnt
), path
->dentry
, CL_PRIVATE
);
1256 p
->mnt
.mnt_flags
|= MNT_INTERNAL
;
1260 #ifdef CONFIG_PROC_FS
1261 /* iterator; we want it to have access to namespace_sem, thus here... */
1262 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
1264 struct proc_mounts
*p
= m
->private;
1266 down_read(&namespace_sem
);
1267 if (p
->cached_event
== p
->ns
->event
) {
1268 void *v
= p
->cached_mount
;
1269 if (*pos
== p
->cached_index
)
1271 if (*pos
== p
->cached_index
+ 1) {
1272 v
= seq_list_next(v
, &p
->ns
->list
, &p
->cached_index
);
1273 return p
->cached_mount
= v
;
1277 p
->cached_event
= p
->ns
->event
;
1278 p
->cached_mount
= seq_list_start(&p
->ns
->list
, *pos
);
1279 p
->cached_index
= *pos
;
1280 return p
->cached_mount
;
1283 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1285 struct proc_mounts
*p
= m
->private;
1287 p
->cached_mount
= seq_list_next(v
, &p
->ns
->list
, pos
);
1288 p
->cached_index
= *pos
;
1289 return p
->cached_mount
;
1292 static void m_stop(struct seq_file
*m
, void *v
)
1294 up_read(&namespace_sem
);
1297 static int m_show(struct seq_file
*m
, void *v
)
1299 struct proc_mounts
*p
= m
->private;
1300 struct mount
*r
= list_entry(v
, struct mount
, mnt_list
);
1301 return p
->show(m
, &r
->mnt
);
1304 const struct seq_operations mounts_op
= {
1310 #endif /* CONFIG_PROC_FS */
1313 * may_umount_tree - check if a mount tree is busy
1314 * @mnt: root of mount tree
1316 * This is called to check if a tree of mounts has any
1317 * open files, pwds, chroots or sub mounts that are
1320 int may_umount_tree(struct vfsmount
*m
)
1322 struct mount
*mnt
= real_mount(m
);
1323 int actual_refs
= 0;
1324 int minimum_refs
= 0;
1328 /* write lock needed for mnt_get_count */
1330 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1331 actual_refs
+= mnt_get_count(p
);
1334 unlock_mount_hash();
1336 if (actual_refs
> minimum_refs
)
1342 EXPORT_SYMBOL(may_umount_tree
);
1345 * may_umount - check if a mount point is busy
1346 * @mnt: root of mount
1348 * This is called to check if a mount point has any
1349 * open files, pwds, chroots or sub mounts. If the
1350 * mount has sub mounts this will return busy
1351 * regardless of whether the sub mounts are busy.
1353 * Doesn't take quota and stuff into account. IOW, in some cases it will
1354 * give false negatives. The main reason why it's here is that we need
1355 * a non-destructive way to look for easily umountable filesystems.
1357 int may_umount(struct vfsmount
*mnt
)
1360 down_read(&namespace_sem
);
1362 if (propagate_mount_busy(real_mount(mnt
), 2))
1364 unlock_mount_hash();
1365 up_read(&namespace_sem
);
1369 EXPORT_SYMBOL(may_umount
);
1371 static HLIST_HEAD(unmounted
); /* protected by namespace_sem */
1373 static void namespace_unlock(void)
1375 struct hlist_head head
;
1377 hlist_move_list(&unmounted
, &head
);
1379 up_write(&namespace_sem
);
1381 if (likely(hlist_empty(&head
)))
1384 synchronize_rcu_expedited();
1386 group_pin_kill(&head
);
1389 static inline void namespace_lock(void)
1391 down_write(&namespace_sem
);
1394 enum umount_tree_flags
{
1396 UMOUNT_PROPAGATE
= 2,
1397 UMOUNT_CONNECTED
= 4,
1400 static bool disconnect_mount(struct mount
*mnt
, enum umount_tree_flags how
)
1402 /* Leaving mounts connected is only valid for lazy umounts */
1403 if (how
& UMOUNT_SYNC
)
1406 /* A mount without a parent has nothing to be connected to */
1407 if (!mnt_has_parent(mnt
))
1410 /* Because the reference counting rules change when mounts are
1411 * unmounted and connected, umounted mounts may not be
1412 * connected to mounted mounts.
1414 if (!(mnt
->mnt_parent
->mnt
.mnt_flags
& MNT_UMOUNT
))
1417 /* Has it been requested that the mount remain connected? */
1418 if (how
& UMOUNT_CONNECTED
)
1421 /* Is the mount locked such that it needs to remain connected? */
1422 if (IS_MNT_LOCKED(mnt
))
1425 /* By default disconnect the mount */
1430 * mount_lock must be held
1431 * namespace_sem must be held for write
1433 static void umount_tree(struct mount
*mnt
, enum umount_tree_flags how
)
1435 LIST_HEAD(tmp_list
);
1438 if (how
& UMOUNT_PROPAGATE
)
1439 propagate_mount_unlock(mnt
);
1441 /* Gather the mounts to umount */
1442 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1443 p
->mnt
.mnt_flags
|= MNT_UMOUNT
;
1444 list_move(&p
->mnt_list
, &tmp_list
);
1447 /* Hide the mounts from mnt_mounts */
1448 list_for_each_entry(p
, &tmp_list
, mnt_list
) {
1449 list_del_init(&p
->mnt_child
);
1452 /* Add propogated mounts to the tmp_list */
1453 if (how
& UMOUNT_PROPAGATE
)
1454 propagate_umount(&tmp_list
);
1456 while (!list_empty(&tmp_list
)) {
1457 struct mnt_namespace
*ns
;
1459 p
= list_first_entry(&tmp_list
, struct mount
, mnt_list
);
1460 list_del_init(&p
->mnt_expire
);
1461 list_del_init(&p
->mnt_list
);
1465 __touch_mnt_namespace(ns
);
1468 if (how
& UMOUNT_SYNC
)
1469 p
->mnt
.mnt_flags
|= MNT_SYNC_UMOUNT
;
1471 disconnect
= disconnect_mount(p
, how
);
1473 pin_insert_group(&p
->mnt_umount
, &p
->mnt_parent
->mnt
,
1474 disconnect
? &unmounted
: NULL
);
1475 if (mnt_has_parent(p
)) {
1476 mnt_add_count(p
->mnt_parent
, -1);
1478 /* Don't forget about p */
1479 list_add_tail(&p
->mnt_child
, &p
->mnt_parent
->mnt_mounts
);
1484 change_mnt_propagation(p
, MS_PRIVATE
);
1488 static void shrink_submounts(struct mount
*mnt
);
1490 static int do_umount_root(struct super_block
*sb
)
1494 down_write(&sb
->s_umount
);
1495 if (!sb_rdonly(sb
)) {
1496 struct fs_context
*fc
;
1498 fc
= fs_context_for_reconfigure(sb
->s_root
, SB_RDONLY
,
1503 ret
= parse_monolithic_mount_data(fc
, NULL
);
1505 ret
= reconfigure_super(fc
);
1509 up_write(&sb
->s_umount
);
1513 static int do_umount(struct mount
*mnt
, int flags
)
1515 struct super_block
*sb
= mnt
->mnt
.mnt_sb
;
1518 retval
= security_sb_umount(&mnt
->mnt
, flags
);
1523 * Allow userspace to request a mountpoint be expired rather than
1524 * unmounting unconditionally. Unmount only happens if:
1525 * (1) the mark is already set (the mark is cleared by mntput())
1526 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1528 if (flags
& MNT_EXPIRE
) {
1529 if (&mnt
->mnt
== current
->fs
->root
.mnt
||
1530 flags
& (MNT_FORCE
| MNT_DETACH
))
1534 * probably don't strictly need the lock here if we examined
1535 * all race cases, but it's a slowpath.
1538 if (mnt_get_count(mnt
) != 2) {
1539 unlock_mount_hash();
1542 unlock_mount_hash();
1544 if (!xchg(&mnt
->mnt_expiry_mark
, 1))
1549 * If we may have to abort operations to get out of this
1550 * mount, and they will themselves hold resources we must
1551 * allow the fs to do things. In the Unix tradition of
1552 * 'Gee thats tricky lets do it in userspace' the umount_begin
1553 * might fail to complete on the first run through as other tasks
1554 * must return, and the like. Thats for the mount program to worry
1555 * about for the moment.
1558 if (flags
& MNT_FORCE
&& sb
->s_op
->umount_begin
) {
1559 sb
->s_op
->umount_begin(sb
);
1563 * No sense to grab the lock for this test, but test itself looks
1564 * somewhat bogus. Suggestions for better replacement?
1565 * Ho-hum... In principle, we might treat that as umount + switch
1566 * to rootfs. GC would eventually take care of the old vfsmount.
1567 * Actually it makes sense, especially if rootfs would contain a
1568 * /reboot - static binary that would close all descriptors and
1569 * call reboot(9). Then init(8) could umount root and exec /reboot.
1571 if (&mnt
->mnt
== current
->fs
->root
.mnt
&& !(flags
& MNT_DETACH
)) {
1573 * Special case for "unmounting" root ...
1574 * we just try to remount it readonly.
1576 if (!ns_capable(sb
->s_user_ns
, CAP_SYS_ADMIN
))
1578 return do_umount_root(sb
);
1584 /* Recheck MNT_LOCKED with the locks held */
1586 if (mnt
->mnt
.mnt_flags
& MNT_LOCKED
)
1590 if (flags
& MNT_DETACH
) {
1591 if (!list_empty(&mnt
->mnt_list
))
1592 umount_tree(mnt
, UMOUNT_PROPAGATE
);
1595 shrink_submounts(mnt
);
1597 if (!propagate_mount_busy(mnt
, 2)) {
1598 if (!list_empty(&mnt
->mnt_list
))
1599 umount_tree(mnt
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
1604 unlock_mount_hash();
1610 * __detach_mounts - lazily unmount all mounts on the specified dentry
1612 * During unlink, rmdir, and d_drop it is possible to loose the path
1613 * to an existing mountpoint, and wind up leaking the mount.
1614 * detach_mounts allows lazily unmounting those mounts instead of
1617 * The caller may hold dentry->d_inode->i_mutex.
1619 void __detach_mounts(struct dentry
*dentry
)
1621 struct mountpoint
*mp
;
1626 mp
= lookup_mountpoint(dentry
);
1627 if (IS_ERR_OR_NULL(mp
))
1631 while (!hlist_empty(&mp
->m_list
)) {
1632 mnt
= hlist_entry(mp
->m_list
.first
, struct mount
, mnt_mp_list
);
1633 if (mnt
->mnt
.mnt_flags
& MNT_UMOUNT
) {
1634 hlist_add_head(&mnt
->mnt_umount
.s_list
, &unmounted
);
1637 else umount_tree(mnt
, UMOUNT_CONNECTED
);
1641 unlock_mount_hash();
1646 * Is the caller allowed to modify his namespace?
1648 static inline bool may_mount(void)
1650 return ns_capable(current
->nsproxy
->mnt_ns
->user_ns
, CAP_SYS_ADMIN
);
1653 static inline bool may_mandlock(void)
1655 #ifndef CONFIG_MANDATORY_FILE_LOCKING
1658 return capable(CAP_SYS_ADMIN
);
1662 * Now umount can handle mount points as well as block devices.
1663 * This is important for filesystems which use unnamed block devices.
1665 * We now support a flag for forced unmount like the other 'big iron'
1666 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1669 int ksys_umount(char __user
*name
, int flags
)
1674 int lookup_flags
= 0;
1676 if (flags
& ~(MNT_FORCE
| MNT_DETACH
| MNT_EXPIRE
| UMOUNT_NOFOLLOW
))
1682 if (!(flags
& UMOUNT_NOFOLLOW
))
1683 lookup_flags
|= LOOKUP_FOLLOW
;
1685 lookup_flags
|= LOOKUP_NO_EVAL
;
1687 retval
= user_path_mountpoint_at(AT_FDCWD
, name
, lookup_flags
, &path
);
1690 mnt
= real_mount(path
.mnt
);
1692 if (path
.dentry
!= path
.mnt
->mnt_root
)
1694 if (!check_mnt(mnt
))
1696 if (mnt
->mnt
.mnt_flags
& MNT_LOCKED
) /* Check optimistically */
1699 if (flags
& MNT_FORCE
&& !capable(CAP_SYS_ADMIN
))
1702 retval
= do_umount(mnt
, flags
);
1704 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1706 mntput_no_expire(mnt
);
1711 SYSCALL_DEFINE2(umount
, char __user
*, name
, int, flags
)
1713 return ksys_umount(name
, flags
);
1716 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1719 * The 2.0 compatible umount. No flags.
1721 SYSCALL_DEFINE1(oldumount
, char __user
*, name
)
1723 return ksys_umount(name
, 0);
1728 static bool is_mnt_ns_file(struct dentry
*dentry
)
1730 /* Is this a proxy for a mount namespace? */
1731 return dentry
->d_op
== &ns_dentry_operations
&&
1732 dentry
->d_fsdata
== &mntns_operations
;
1735 struct mnt_namespace
*to_mnt_ns(struct ns_common
*ns
)
1737 return container_of(ns
, struct mnt_namespace
, ns
);
1740 static bool mnt_ns_loop(struct dentry
*dentry
)
1742 /* Could bind mounting the mount namespace inode cause a
1743 * mount namespace loop?
1745 struct mnt_namespace
*mnt_ns
;
1746 if (!is_mnt_ns_file(dentry
))
1749 mnt_ns
= to_mnt_ns(get_proc_ns(dentry
->d_inode
));
1750 return current
->nsproxy
->mnt_ns
->seq
>= mnt_ns
->seq
;
1753 struct mount
*copy_tree(struct mount
*mnt
, struct dentry
*dentry
,
1756 struct mount
*res
, *p
, *q
, *r
, *parent
;
1758 if (!(flag
& CL_COPY_UNBINDABLE
) && IS_MNT_UNBINDABLE(mnt
))
1759 return ERR_PTR(-EINVAL
);
1761 if (!(flag
& CL_COPY_MNT_NS_FILE
) && is_mnt_ns_file(dentry
))
1762 return ERR_PTR(-EINVAL
);
1764 res
= q
= clone_mnt(mnt
, dentry
, flag
);
1768 q
->mnt_mountpoint
= mnt
->mnt_mountpoint
;
1771 list_for_each_entry(r
, &mnt
->mnt_mounts
, mnt_child
) {
1773 if (!is_subdir(r
->mnt_mountpoint
, dentry
))
1776 for (s
= r
; s
; s
= next_mnt(s
, r
)) {
1777 if (!(flag
& CL_COPY_UNBINDABLE
) &&
1778 IS_MNT_UNBINDABLE(s
)) {
1779 if (s
->mnt
.mnt_flags
& MNT_LOCKED
) {
1780 /* Both unbindable and locked. */
1781 q
= ERR_PTR(-EPERM
);
1784 s
= skip_mnt_tree(s
);
1788 if (!(flag
& CL_COPY_MNT_NS_FILE
) &&
1789 is_mnt_ns_file(s
->mnt
.mnt_root
)) {
1790 s
= skip_mnt_tree(s
);
1793 while (p
!= s
->mnt_parent
) {
1799 q
= clone_mnt(p
, p
->mnt
.mnt_root
, flag
);
1803 list_add_tail(&q
->mnt_list
, &res
->mnt_list
);
1804 attach_mnt(q
, parent
, p
->mnt_mp
);
1805 unlock_mount_hash();
1812 umount_tree(res
, UMOUNT_SYNC
);
1813 unlock_mount_hash();
1818 /* Caller should check returned pointer for errors */
1820 struct vfsmount
*collect_mounts(const struct path
*path
)
1824 if (!check_mnt(real_mount(path
->mnt
)))
1825 tree
= ERR_PTR(-EINVAL
);
1827 tree
= copy_tree(real_mount(path
->mnt
), path
->dentry
,
1828 CL_COPY_ALL
| CL_PRIVATE
);
1831 return ERR_CAST(tree
);
1835 void drop_collected_mounts(struct vfsmount
*mnt
)
1839 umount_tree(real_mount(mnt
), 0);
1840 unlock_mount_hash();
1845 * clone_private_mount - create a private clone of a path
1847 * This creates a new vfsmount, which will be the clone of @path. The new will
1848 * not be attached anywhere in the namespace and will be private (i.e. changes
1849 * to the originating mount won't be propagated into this).
1851 * Release with mntput().
1853 struct vfsmount
*clone_private_mount(const struct path
*path
)
1855 struct mount
*old_mnt
= real_mount(path
->mnt
);
1856 struct mount
*new_mnt
;
1858 if (IS_MNT_UNBINDABLE(old_mnt
))
1859 return ERR_PTR(-EINVAL
);
1861 new_mnt
= clone_mnt(old_mnt
, path
->dentry
, CL_PRIVATE
);
1862 if (IS_ERR(new_mnt
))
1863 return ERR_CAST(new_mnt
);
1865 return &new_mnt
->mnt
;
1867 EXPORT_SYMBOL_GPL(clone_private_mount
);
1869 int iterate_mounts(int (*f
)(struct vfsmount
*, void *), void *arg
,
1870 struct vfsmount
*root
)
1873 int res
= f(root
, arg
);
1876 list_for_each_entry(mnt
, &real_mount(root
)->mnt_list
, mnt_list
) {
1877 res
= f(&mnt
->mnt
, arg
);
1884 static void lock_mnt_tree(struct mount
*mnt
)
1888 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1889 int flags
= p
->mnt
.mnt_flags
;
1890 /* Don't allow unprivileged users to change mount flags */
1891 flags
|= MNT_LOCK_ATIME
;
1893 if (flags
& MNT_READONLY
)
1894 flags
|= MNT_LOCK_READONLY
;
1896 if (flags
& MNT_NODEV
)
1897 flags
|= MNT_LOCK_NODEV
;
1899 if (flags
& MNT_NOSUID
)
1900 flags
|= MNT_LOCK_NOSUID
;
1902 if (flags
& MNT_NOEXEC
)
1903 flags
|= MNT_LOCK_NOEXEC
;
1904 /* Don't allow unprivileged users to reveal what is under a mount */
1905 if (list_empty(&p
->mnt_expire
))
1906 flags
|= MNT_LOCKED
;
1907 p
->mnt
.mnt_flags
= flags
;
1911 static void cleanup_group_ids(struct mount
*mnt
, struct mount
*end
)
1915 for (p
= mnt
; p
!= end
; p
= next_mnt(p
, mnt
)) {
1916 if (p
->mnt_group_id
&& !IS_MNT_SHARED(p
))
1917 mnt_release_group_id(p
);
1921 static int invent_group_ids(struct mount
*mnt
, bool recurse
)
1925 for (p
= mnt
; p
; p
= recurse
? next_mnt(p
, mnt
) : NULL
) {
1926 if (!p
->mnt_group_id
&& !IS_MNT_SHARED(p
)) {
1927 int err
= mnt_alloc_group_id(p
);
1929 cleanup_group_ids(mnt
, p
);
1938 int count_mounts(struct mnt_namespace
*ns
, struct mount
*mnt
)
1940 unsigned int max
= READ_ONCE(sysctl_mount_max
);
1941 unsigned int mounts
= 0, old
, pending
, sum
;
1944 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
))
1948 pending
= ns
->pending_mounts
;
1949 sum
= old
+ pending
;
1953 (mounts
> (max
- sum
)))
1956 ns
->pending_mounts
= pending
+ mounts
;
1961 * @source_mnt : mount tree to be attached
1962 * @nd : place the mount tree @source_mnt is attached
1963 * @parent_nd : if non-null, detach the source_mnt from its parent and
1964 * store the parent mount and mountpoint dentry.
1965 * (done when source_mnt is moved)
1967 * NOTE: in the table below explains the semantics when a source mount
1968 * of a given type is attached to a destination mount of a given type.
1969 * ---------------------------------------------------------------------------
1970 * | BIND MOUNT OPERATION |
1971 * |**************************************************************************
1972 * | source-->| shared | private | slave | unbindable |
1976 * |**************************************************************************
1977 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1979 * |non-shared| shared (+) | private | slave (*) | invalid |
1980 * ***************************************************************************
1981 * A bind operation clones the source mount and mounts the clone on the
1982 * destination mount.
1984 * (++) the cloned mount is propagated to all the mounts in the propagation
1985 * tree of the destination mount and the cloned mount is added to
1986 * the peer group of the source mount.
1987 * (+) the cloned mount is created under the destination mount and is marked
1988 * as shared. The cloned mount is added to the peer group of the source
1990 * (+++) the mount is propagated to all the mounts in the propagation tree
1991 * of the destination mount and the cloned mount is made slave
1992 * of the same master as that of the source mount. The cloned mount
1993 * is marked as 'shared and slave'.
1994 * (*) the cloned mount is made a slave of the same master as that of the
1997 * ---------------------------------------------------------------------------
1998 * | MOVE MOUNT OPERATION |
1999 * |**************************************************************************
2000 * | source-->| shared | private | slave | unbindable |
2004 * |**************************************************************************
2005 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
2007 * |non-shared| shared (+*) | private | slave (*) | unbindable |
2008 * ***************************************************************************
2010 * (+) the mount is moved to the destination. And is then propagated to
2011 * all the mounts in the propagation tree of the destination mount.
2012 * (+*) the mount is moved to the destination.
2013 * (+++) the mount is moved to the destination and is then propagated to
2014 * all the mounts belonging to the destination mount's propagation tree.
2015 * the mount is marked as 'shared and slave'.
2016 * (*) the mount continues to be a slave at the new location.
2018 * if the source mount is a tree, the operations explained above is
2019 * applied to each mount in the tree.
2020 * Must be called without spinlocks held, since this function can sleep
2023 static int attach_recursive_mnt(struct mount
*source_mnt
,
2024 struct mount
*dest_mnt
,
2025 struct mountpoint
*dest_mp
,
2026 struct path
*parent_path
)
2028 struct user_namespace
*user_ns
= current
->nsproxy
->mnt_ns
->user_ns
;
2029 HLIST_HEAD(tree_list
);
2030 struct mnt_namespace
*ns
= dest_mnt
->mnt_ns
;
2031 struct mountpoint
*smp
;
2032 struct mount
*child
, *p
;
2033 struct hlist_node
*n
;
2036 /* Preallocate a mountpoint in case the new mounts need
2037 * to be tucked under other mounts.
2039 smp
= get_mountpoint(source_mnt
->mnt
.mnt_root
);
2041 return PTR_ERR(smp
);
2043 /* Is there space to add these mounts to the mount namespace? */
2045 err
= count_mounts(ns
, source_mnt
);
2050 if (IS_MNT_SHARED(dest_mnt
)) {
2051 err
= invent_group_ids(source_mnt
, true);
2054 err
= propagate_mnt(dest_mnt
, dest_mp
, source_mnt
, &tree_list
);
2057 goto out_cleanup_ids
;
2058 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
2064 detach_mnt(source_mnt
, parent_path
);
2065 attach_mnt(source_mnt
, dest_mnt
, dest_mp
);
2066 touch_mnt_namespace(source_mnt
->mnt_ns
);
2068 mnt_set_mountpoint(dest_mnt
, dest_mp
, source_mnt
);
2069 commit_tree(source_mnt
);
2072 hlist_for_each_entry_safe(child
, n
, &tree_list
, mnt_hash
) {
2074 hlist_del_init(&child
->mnt_hash
);
2075 q
= __lookup_mnt(&child
->mnt_parent
->mnt
,
2076 child
->mnt_mountpoint
);
2078 mnt_change_mountpoint(child
, smp
, q
);
2079 /* Notice when we are propagating across user namespaces */
2080 if (child
->mnt_parent
->mnt_ns
->user_ns
!= user_ns
)
2081 lock_mnt_tree(child
);
2084 put_mountpoint(smp
);
2085 unlock_mount_hash();
2090 while (!hlist_empty(&tree_list
)) {
2091 child
= hlist_entry(tree_list
.first
, struct mount
, mnt_hash
);
2092 child
->mnt_parent
->mnt_ns
->pending_mounts
= 0;
2093 umount_tree(child
, UMOUNT_SYNC
);
2095 unlock_mount_hash();
2096 cleanup_group_ids(source_mnt
, NULL
);
2098 ns
->pending_mounts
= 0;
2100 read_seqlock_excl(&mount_lock
);
2101 put_mountpoint(smp
);
2102 read_sequnlock_excl(&mount_lock
);
2107 static struct mountpoint
*lock_mount(struct path
*path
)
2109 struct vfsmount
*mnt
;
2110 struct dentry
*dentry
= path
->dentry
;
2112 inode_lock(dentry
->d_inode
);
2113 if (unlikely(cant_mount(dentry
))) {
2114 inode_unlock(dentry
->d_inode
);
2115 return ERR_PTR(-ENOENT
);
2118 mnt
= lookup_mnt(path
);
2120 struct mountpoint
*mp
= get_mountpoint(dentry
);
2123 inode_unlock(dentry
->d_inode
);
2129 inode_unlock(path
->dentry
->d_inode
);
2132 dentry
= path
->dentry
= dget(mnt
->mnt_root
);
2136 static void unlock_mount(struct mountpoint
*where
)
2138 struct dentry
*dentry
= where
->m_dentry
;
2140 read_seqlock_excl(&mount_lock
);
2141 put_mountpoint(where
);
2142 read_sequnlock_excl(&mount_lock
);
2145 inode_unlock(dentry
->d_inode
);
2148 static int graft_tree(struct mount
*mnt
, struct mount
*p
, struct mountpoint
*mp
)
2150 if (mnt
->mnt
.mnt_sb
->s_flags
& SB_NOUSER
)
2153 if (d_is_dir(mp
->m_dentry
) !=
2154 d_is_dir(mnt
->mnt
.mnt_root
))
2157 return attach_recursive_mnt(mnt
, p
, mp
, NULL
);
2161 * Sanity check the flags to change_mnt_propagation.
2164 static int flags_to_propagation_type(int ms_flags
)
2166 int type
= ms_flags
& ~(MS_REC
| MS_SILENT
);
2168 /* Fail if any non-propagation flags are set */
2169 if (type
& ~(MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
2171 /* Only one propagation flag should be set */
2172 if (!is_power_of_2(type
))
2178 * recursively change the type of the mountpoint.
2180 static int do_change_type(struct path
*path
, int ms_flags
)
2183 struct mount
*mnt
= real_mount(path
->mnt
);
2184 int recurse
= ms_flags
& MS_REC
;
2188 if (path
->dentry
!= path
->mnt
->mnt_root
)
2191 type
= flags_to_propagation_type(ms_flags
);
2196 if (type
== MS_SHARED
) {
2197 err
= invent_group_ids(mnt
, recurse
);
2203 for (m
= mnt
; m
; m
= (recurse
? next_mnt(m
, mnt
) : NULL
))
2204 change_mnt_propagation(m
, type
);
2205 unlock_mount_hash();
2212 static bool has_locked_children(struct mount
*mnt
, struct dentry
*dentry
)
2214 struct mount
*child
;
2215 list_for_each_entry(child
, &mnt
->mnt_mounts
, mnt_child
) {
2216 if (!is_subdir(child
->mnt_mountpoint
, dentry
))
2219 if (child
->mnt
.mnt_flags
& MNT_LOCKED
)
2226 * do loopback mount.
2228 static int do_loopback(struct path
*path
, const char *old_name
,
2231 struct path old_path
;
2232 struct mount
*mnt
= NULL
, *old
, *parent
;
2233 struct mountpoint
*mp
;
2235 if (!old_name
|| !*old_name
)
2237 err
= kern_path(old_name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &old_path
);
2242 if (mnt_ns_loop(old_path
.dentry
))
2245 mp
= lock_mount(path
);
2250 old
= real_mount(old_path
.mnt
);
2251 parent
= real_mount(path
->mnt
);
2254 if (IS_MNT_UNBINDABLE(old
))
2257 if (!check_mnt(parent
))
2260 if (!check_mnt(old
) && old_path
.dentry
->d_op
!= &ns_dentry_operations
)
2263 if (!recurse
&& has_locked_children(old
, old_path
.dentry
))
2267 mnt
= copy_tree(old
, old_path
.dentry
, CL_COPY_MNT_NS_FILE
);
2269 mnt
= clone_mnt(old
, old_path
.dentry
, 0);
2276 mnt
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
2278 err
= graft_tree(mnt
, parent
, mp
);
2281 umount_tree(mnt
, UMOUNT_SYNC
);
2282 unlock_mount_hash();
2287 path_put(&old_path
);
2292 * Don't allow locked mount flags to be cleared.
2294 * No locks need to be held here while testing the various MNT_LOCK
2295 * flags because those flags can never be cleared once they are set.
2297 static bool can_change_locked_flags(struct mount
*mnt
, unsigned int mnt_flags
)
2299 unsigned int fl
= mnt
->mnt
.mnt_flags
;
2301 if ((fl
& MNT_LOCK_READONLY
) &&
2302 !(mnt_flags
& MNT_READONLY
))
2305 if ((fl
& MNT_LOCK_NODEV
) &&
2306 !(mnt_flags
& MNT_NODEV
))
2309 if ((fl
& MNT_LOCK_NOSUID
) &&
2310 !(mnt_flags
& MNT_NOSUID
))
2313 if ((fl
& MNT_LOCK_NOEXEC
) &&
2314 !(mnt_flags
& MNT_NOEXEC
))
2317 if ((fl
& MNT_LOCK_ATIME
) &&
2318 ((fl
& MNT_ATIME_MASK
) != (mnt_flags
& MNT_ATIME_MASK
)))
2324 static int change_mount_ro_state(struct mount
*mnt
, unsigned int mnt_flags
)
2326 bool readonly_request
= (mnt_flags
& MNT_READONLY
);
2328 if (readonly_request
== __mnt_is_readonly(&mnt
->mnt
))
2331 if (readonly_request
)
2332 return mnt_make_readonly(mnt
);
2334 return __mnt_unmake_readonly(mnt
);
2338 * Update the user-settable attributes on a mount. The caller must hold
2339 * sb->s_umount for writing.
2341 static void set_mount_attributes(struct mount
*mnt
, unsigned int mnt_flags
)
2344 mnt_flags
|= mnt
->mnt
.mnt_flags
& ~MNT_USER_SETTABLE_MASK
;
2345 mnt
->mnt
.mnt_flags
= mnt_flags
;
2346 touch_mnt_namespace(mnt
->mnt_ns
);
2347 unlock_mount_hash();
2351 * Handle reconfiguration of the mountpoint only without alteration of the
2352 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
2355 static int do_reconfigure_mnt(struct path
*path
, unsigned int mnt_flags
)
2357 struct super_block
*sb
= path
->mnt
->mnt_sb
;
2358 struct mount
*mnt
= real_mount(path
->mnt
);
2361 if (!check_mnt(mnt
))
2364 if (path
->dentry
!= mnt
->mnt
.mnt_root
)
2367 if (!can_change_locked_flags(mnt
, mnt_flags
))
2370 down_write(&sb
->s_umount
);
2371 ret
= change_mount_ro_state(mnt
, mnt_flags
);
2373 set_mount_attributes(mnt
, mnt_flags
);
2374 up_write(&sb
->s_umount
);
2379 * change filesystem flags. dir should be a physical root of filesystem.
2380 * If you've mounted a non-root directory somewhere and want to do remount
2381 * on it - tough luck.
2383 static int do_remount(struct path
*path
, int ms_flags
, int sb_flags
,
2384 int mnt_flags
, void *data
)
2387 struct super_block
*sb
= path
->mnt
->mnt_sb
;
2388 struct mount
*mnt
= real_mount(path
->mnt
);
2389 struct fs_context
*fc
;
2391 if (!check_mnt(mnt
))
2394 if (path
->dentry
!= path
->mnt
->mnt_root
)
2397 if (!can_change_locked_flags(mnt
, mnt_flags
))
2400 fc
= fs_context_for_reconfigure(path
->dentry
, sb_flags
, MS_RMT_MASK
);
2404 err
= parse_monolithic_mount_data(fc
, data
);
2406 down_write(&sb
->s_umount
);
2408 if (ns_capable(sb
->s_user_ns
, CAP_SYS_ADMIN
)) {
2409 err
= reconfigure_super(fc
);
2411 set_mount_attributes(mnt
, mnt_flags
);
2413 up_write(&sb
->s_umount
);
2419 static inline int tree_contains_unbindable(struct mount
*mnt
)
2422 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
2423 if (IS_MNT_UNBINDABLE(p
))
2429 static int do_move_mount(struct path
*path
, const char *old_name
)
2431 struct path old_path
, parent_path
;
2434 struct mountpoint
*mp
;
2436 if (!old_name
|| !*old_name
)
2438 err
= kern_path(old_name
, LOOKUP_FOLLOW
, &old_path
);
2442 mp
= lock_mount(path
);
2447 old
= real_mount(old_path
.mnt
);
2448 p
= real_mount(path
->mnt
);
2451 if (!check_mnt(p
) || !check_mnt(old
))
2454 if (old
->mnt
.mnt_flags
& MNT_LOCKED
)
2458 if (old_path
.dentry
!= old_path
.mnt
->mnt_root
)
2461 if (!mnt_has_parent(old
))
2464 if (d_is_dir(path
->dentry
) !=
2465 d_is_dir(old_path
.dentry
))
2468 * Don't move a mount residing in a shared parent.
2470 if (IS_MNT_SHARED(old
->mnt_parent
))
2473 * Don't move a mount tree containing unbindable mounts to a destination
2474 * mount which is shared.
2476 if (IS_MNT_SHARED(p
) && tree_contains_unbindable(old
))
2479 for (; mnt_has_parent(p
); p
= p
->mnt_parent
)
2483 err
= attach_recursive_mnt(old
, real_mount(path
->mnt
), mp
, &parent_path
);
2487 /* if the mount is moved, it should no longer be expire
2489 list_del_init(&old
->mnt_expire
);
2494 path_put(&parent_path
);
2495 path_put(&old_path
);
2500 * add a mount into a namespace's mount tree
2502 static int do_add_mount(struct mount
*newmnt
, struct path
*path
, int mnt_flags
)
2504 struct mountpoint
*mp
;
2505 struct mount
*parent
;
2508 mnt_flags
&= ~MNT_INTERNAL_FLAGS
;
2510 mp
= lock_mount(path
);
2514 parent
= real_mount(path
->mnt
);
2516 if (unlikely(!check_mnt(parent
))) {
2517 /* that's acceptable only for automounts done in private ns */
2518 if (!(mnt_flags
& MNT_SHRINKABLE
))
2520 /* ... and for those we'd better have mountpoint still alive */
2521 if (!parent
->mnt_ns
)
2525 /* Refuse the same filesystem on the same mount point */
2527 if (path
->mnt
->mnt_sb
== newmnt
->mnt
.mnt_sb
&&
2528 path
->mnt
->mnt_root
== path
->dentry
)
2532 if (d_is_symlink(newmnt
->mnt
.mnt_root
))
2535 newmnt
->mnt
.mnt_flags
= mnt_flags
;
2536 err
= graft_tree(newmnt
, parent
, mp
);
2543 static bool mount_too_revealing(const struct super_block
*sb
, int *new_mnt_flags
);
2546 * Create a new mount using a superblock configuration and request it
2547 * be added to the namespace tree.
2549 static int do_new_mount_fc(struct fs_context
*fc
, struct path
*mountpoint
,
2550 unsigned int mnt_flags
)
2552 struct vfsmount
*mnt
;
2553 struct super_block
*sb
= fc
->root
->d_sb
;
2556 error
= security_sb_kern_mount(sb
);
2557 if (!error
&& mount_too_revealing(sb
, &mnt_flags
))
2560 if (unlikely(error
)) {
2565 up_write(&sb
->s_umount
);
2567 mnt
= vfs_create_mount(fc
);
2569 return PTR_ERR(mnt
);
2571 error
= do_add_mount(real_mount(mnt
), mountpoint
, mnt_flags
);
2578 * create a new mount for userspace and request it to be added into the
2581 static int do_new_mount(struct path
*path
, const char *fstype
, int sb_flags
,
2582 int mnt_flags
, const char *name
, void *data
)
2584 struct file_system_type
*type
;
2585 struct fs_context
*fc
;
2586 const char *subtype
= NULL
;
2592 type
= get_fs_type(fstype
);
2596 if (type
->fs_flags
& FS_HAS_SUBTYPE
) {
2597 subtype
= strchr(fstype
, '.');
2601 put_filesystem(type
);
2609 fc
= fs_context_for_mount(type
, sb_flags
);
2610 put_filesystem(type
);
2615 err
= vfs_parse_fs_string(fc
, "subtype",
2616 subtype
, strlen(subtype
));
2618 err
= vfs_parse_fs_string(fc
, "source", name
, strlen(name
));
2620 err
= parse_monolithic_mount_data(fc
, data
);
2622 err
= vfs_get_tree(fc
);
2624 err
= do_new_mount_fc(fc
, path
, mnt_flags
);
2630 int finish_automount(struct vfsmount
*m
, struct path
*path
)
2632 struct mount
*mnt
= real_mount(m
);
2634 /* The new mount record should have at least 2 refs to prevent it being
2635 * expired before we get a chance to add it
2637 BUG_ON(mnt_get_count(mnt
) < 2);
2639 if (m
->mnt_sb
== path
->mnt
->mnt_sb
&&
2640 m
->mnt_root
== path
->dentry
) {
2645 err
= do_add_mount(mnt
, path
, path
->mnt
->mnt_flags
| MNT_SHRINKABLE
);
2649 /* remove m from any expiration list it may be on */
2650 if (!list_empty(&mnt
->mnt_expire
)) {
2652 list_del_init(&mnt
->mnt_expire
);
2661 * mnt_set_expiry - Put a mount on an expiration list
2662 * @mnt: The mount to list.
2663 * @expiry_list: The list to add the mount to.
2665 void mnt_set_expiry(struct vfsmount
*mnt
, struct list_head
*expiry_list
)
2669 list_add_tail(&real_mount(mnt
)->mnt_expire
, expiry_list
);
2673 EXPORT_SYMBOL(mnt_set_expiry
);
2676 * process a list of expirable mountpoints with the intent of discarding any
2677 * mountpoints that aren't in use and haven't been touched since last we came
2680 void mark_mounts_for_expiry(struct list_head
*mounts
)
2682 struct mount
*mnt
, *next
;
2683 LIST_HEAD(graveyard
);
2685 if (list_empty(mounts
))
2691 /* extract from the expiration list every vfsmount that matches the
2692 * following criteria:
2693 * - only referenced by its parent vfsmount
2694 * - still marked for expiry (marked on the last call here; marks are
2695 * cleared by mntput())
2697 list_for_each_entry_safe(mnt
, next
, mounts
, mnt_expire
) {
2698 if (!xchg(&mnt
->mnt_expiry_mark
, 1) ||
2699 propagate_mount_busy(mnt
, 1))
2701 list_move(&mnt
->mnt_expire
, &graveyard
);
2703 while (!list_empty(&graveyard
)) {
2704 mnt
= list_first_entry(&graveyard
, struct mount
, mnt_expire
);
2705 touch_mnt_namespace(mnt
->mnt_ns
);
2706 umount_tree(mnt
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
2708 unlock_mount_hash();
2712 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry
);
2715 * Ripoff of 'select_parent()'
2717 * search the list of submounts for a given mountpoint, and move any
2718 * shrinkable submounts to the 'graveyard' list.
2720 static int select_submounts(struct mount
*parent
, struct list_head
*graveyard
)
2722 struct mount
*this_parent
= parent
;
2723 struct list_head
*next
;
2727 next
= this_parent
->mnt_mounts
.next
;
2729 while (next
!= &this_parent
->mnt_mounts
) {
2730 struct list_head
*tmp
= next
;
2731 struct mount
*mnt
= list_entry(tmp
, struct mount
, mnt_child
);
2734 if (!(mnt
->mnt
.mnt_flags
& MNT_SHRINKABLE
))
2737 * Descend a level if the d_mounts list is non-empty.
2739 if (!list_empty(&mnt
->mnt_mounts
)) {
2744 if (!propagate_mount_busy(mnt
, 1)) {
2745 list_move_tail(&mnt
->mnt_expire
, graveyard
);
2750 * All done at this level ... ascend and resume the search
2752 if (this_parent
!= parent
) {
2753 next
= this_parent
->mnt_child
.next
;
2754 this_parent
= this_parent
->mnt_parent
;
2761 * process a list of expirable mountpoints with the intent of discarding any
2762 * submounts of a specific parent mountpoint
2764 * mount_lock must be held for write
2766 static void shrink_submounts(struct mount
*mnt
)
2768 LIST_HEAD(graveyard
);
2771 /* extract submounts of 'mountpoint' from the expiration list */
2772 while (select_submounts(mnt
, &graveyard
)) {
2773 while (!list_empty(&graveyard
)) {
2774 m
= list_first_entry(&graveyard
, struct mount
,
2776 touch_mnt_namespace(m
->mnt_ns
);
2777 umount_tree(m
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
2783 * Some copy_from_user() implementations do not return the exact number of
2784 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
2785 * Note that this function differs from copy_from_user() in that it will oops
2786 * on bad values of `to', rather than returning a short copy.
2788 static long exact_copy_from_user(void *to
, const void __user
* from
,
2792 const char __user
*f
= from
;
2795 if (!access_ok(from
, n
))
2799 if (__get_user(c
, f
)) {
2810 void *copy_mount_options(const void __user
* data
)
2819 copy
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
2821 return ERR_PTR(-ENOMEM
);
2823 /* We only care that *some* data at the address the user
2824 * gave us is valid. Just in case, we'll zero
2825 * the remainder of the page.
2827 /* copy_from_user cannot cross TASK_SIZE ! */
2828 size
= TASK_SIZE
- (unsigned long)data
;
2829 if (size
> PAGE_SIZE
)
2832 i
= size
- exact_copy_from_user(copy
, data
, size
);
2835 return ERR_PTR(-EFAULT
);
2838 memset(copy
+ i
, 0, PAGE_SIZE
- i
);
2842 char *copy_mount_string(const void __user
*data
)
2844 return data
? strndup_user(data
, PATH_MAX
) : NULL
;
2848 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2849 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2851 * data is a (void *) that can point to any structure up to
2852 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2853 * information (or be NULL).
2855 * Pre-0.97 versions of mount() didn't have a flags word.
2856 * When the flags word was introduced its top half was required
2857 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2858 * Therefore, if this magic number is present, it carries no information
2859 * and must be discarded.
2861 long do_mount(const char *dev_name
, const char __user
*dir_name
,
2862 const char *type_page
, unsigned long flags
, void *data_page
)
2865 unsigned int mnt_flags
= 0, sb_flags
;
2869 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
2870 flags
&= ~MS_MGC_MSK
;
2872 /* Basic sanity checks */
2874 ((char *)data_page
)[PAGE_SIZE
- 1] = 0;
2876 if (flags
& MS_NOUSER
)
2879 /* ... and get the mountpoint */
2880 retval
= user_path(dir_name
, &path
);
2884 retval
= security_sb_mount(dev_name
, &path
,
2885 type_page
, flags
, data_page
);
2886 if (!retval
&& !may_mount())
2888 if (!retval
&& (flags
& SB_MANDLOCK
) && !may_mandlock())
2893 /* Default to relatime unless overriden */
2894 if (!(flags
& MS_NOATIME
))
2895 mnt_flags
|= MNT_RELATIME
;
2897 /* Separate the per-mountpoint flags */
2898 if (flags
& MS_NOSUID
)
2899 mnt_flags
|= MNT_NOSUID
;
2900 if (flags
& MS_NODEV
)
2901 mnt_flags
|= MNT_NODEV
;
2902 if (flags
& MS_NOEXEC
)
2903 mnt_flags
|= MNT_NOEXEC
;
2904 if (flags
& MS_NOATIME
)
2905 mnt_flags
|= MNT_NOATIME
;
2906 if (flags
& MS_NODIRATIME
)
2907 mnt_flags
|= MNT_NODIRATIME
;
2908 if (flags
& MS_STRICTATIME
)
2909 mnt_flags
&= ~(MNT_RELATIME
| MNT_NOATIME
);
2910 if (flags
& MS_RDONLY
)
2911 mnt_flags
|= MNT_READONLY
;
2913 /* The default atime for remount is preservation */
2914 if ((flags
& MS_REMOUNT
) &&
2915 ((flags
& (MS_NOATIME
| MS_NODIRATIME
| MS_RELATIME
|
2916 MS_STRICTATIME
)) == 0)) {
2917 mnt_flags
&= ~MNT_ATIME_MASK
;
2918 mnt_flags
|= path
.mnt
->mnt_flags
& MNT_ATIME_MASK
;
2921 sb_flags
= flags
& (SB_RDONLY
|
2930 if ((flags
& (MS_REMOUNT
| MS_BIND
)) == (MS_REMOUNT
| MS_BIND
))
2931 retval
= do_reconfigure_mnt(&path
, mnt_flags
);
2932 else if (flags
& MS_REMOUNT
)
2933 retval
= do_remount(&path
, flags
, sb_flags
, mnt_flags
,
2935 else if (flags
& MS_BIND
)
2936 retval
= do_loopback(&path
, dev_name
, flags
& MS_REC
);
2937 else if (flags
& (MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
2938 retval
= do_change_type(&path
, flags
);
2939 else if (flags
& MS_MOVE
)
2940 retval
= do_move_mount(&path
, dev_name
);
2942 retval
= do_new_mount(&path
, type_page
, sb_flags
, mnt_flags
,
2943 dev_name
, data_page
);
2949 static struct ucounts
*inc_mnt_namespaces(struct user_namespace
*ns
)
2951 return inc_ucount(ns
, current_euid(), UCOUNT_MNT_NAMESPACES
);
2954 static void dec_mnt_namespaces(struct ucounts
*ucounts
)
2956 dec_ucount(ucounts
, UCOUNT_MNT_NAMESPACES
);
2959 static void free_mnt_ns(struct mnt_namespace
*ns
)
2961 if (!is_anon_ns(ns
))
2962 ns_free_inum(&ns
->ns
);
2963 dec_mnt_namespaces(ns
->ucounts
);
2964 put_user_ns(ns
->user_ns
);
2969 * Assign a sequence number so we can detect when we attempt to bind
2970 * mount a reference to an older mount namespace into the current
2971 * mount namespace, preventing reference counting loops. A 64bit
2972 * number incrementing at 10Ghz will take 12,427 years to wrap which
2973 * is effectively never, so we can ignore the possibility.
2975 static atomic64_t mnt_ns_seq
= ATOMIC64_INIT(1);
2977 static struct mnt_namespace
*alloc_mnt_ns(struct user_namespace
*user_ns
, bool anon
)
2979 struct mnt_namespace
*new_ns
;
2980 struct ucounts
*ucounts
;
2983 ucounts
= inc_mnt_namespaces(user_ns
);
2985 return ERR_PTR(-ENOSPC
);
2987 new_ns
= kzalloc(sizeof(struct mnt_namespace
), GFP_KERNEL
);
2989 dec_mnt_namespaces(ucounts
);
2990 return ERR_PTR(-ENOMEM
);
2993 ret
= ns_alloc_inum(&new_ns
->ns
);
2996 dec_mnt_namespaces(ucounts
);
2997 return ERR_PTR(ret
);
3000 new_ns
->ns
.ops
= &mntns_operations
;
3002 new_ns
->seq
= atomic64_add_return(1, &mnt_ns_seq
);
3003 atomic_set(&new_ns
->count
, 1);
3004 INIT_LIST_HEAD(&new_ns
->list
);
3005 init_waitqueue_head(&new_ns
->poll
);
3006 new_ns
->user_ns
= get_user_ns(user_ns
);
3007 new_ns
->ucounts
= ucounts
;
3012 struct mnt_namespace
*copy_mnt_ns(unsigned long flags
, struct mnt_namespace
*ns
,
3013 struct user_namespace
*user_ns
, struct fs_struct
*new_fs
)
3015 struct mnt_namespace
*new_ns
;
3016 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
;
3017 struct mount
*p
, *q
;
3024 if (likely(!(flags
& CLONE_NEWNS
))) {
3031 new_ns
= alloc_mnt_ns(user_ns
, false);
3036 /* First pass: copy the tree topology */
3037 copy_flags
= CL_COPY_UNBINDABLE
| CL_EXPIRE
;
3038 if (user_ns
!= ns
->user_ns
)
3039 copy_flags
|= CL_SHARED_TO_SLAVE
;
3040 new = copy_tree(old
, old
->mnt
.mnt_root
, copy_flags
);
3043 free_mnt_ns(new_ns
);
3044 return ERR_CAST(new);
3046 if (user_ns
!= ns
->user_ns
) {
3049 unlock_mount_hash();
3052 list_add_tail(&new_ns
->list
, &new->mnt_list
);
3055 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
3056 * as belonging to new namespace. We have already acquired a private
3057 * fs_struct, so tsk->fs->lock is not needed.
3065 if (&p
->mnt
== new_fs
->root
.mnt
) {
3066 new_fs
->root
.mnt
= mntget(&q
->mnt
);
3069 if (&p
->mnt
== new_fs
->pwd
.mnt
) {
3070 new_fs
->pwd
.mnt
= mntget(&q
->mnt
);
3074 p
= next_mnt(p
, old
);
3075 q
= next_mnt(q
, new);
3078 while (p
->mnt
.mnt_root
!= q
->mnt
.mnt_root
)
3079 p
= next_mnt(p
, old
);
3091 struct dentry
*mount_subtree(struct vfsmount
*m
, const char *name
)
3093 struct mount
*mnt
= real_mount(m
);
3094 struct mnt_namespace
*ns
;
3095 struct super_block
*s
;
3099 ns
= alloc_mnt_ns(&init_user_ns
, true);
3102 return ERR_CAST(ns
);
3107 list_add(&mnt
->mnt_list
, &ns
->list
);
3109 err
= vfs_path_lookup(m
->mnt_root
, m
,
3110 name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &path
);
3115 return ERR_PTR(err
);
3117 /* trade a vfsmount reference for active sb one */
3118 s
= path
.mnt
->mnt_sb
;
3119 atomic_inc(&s
->s_active
);
3121 /* lock the sucker */
3122 down_write(&s
->s_umount
);
3123 /* ... and return the root of (sub)tree on it */
3126 EXPORT_SYMBOL(mount_subtree
);
3128 int ksys_mount(char __user
*dev_name
, char __user
*dir_name
, char __user
*type
,
3129 unsigned long flags
, void __user
*data
)
3136 kernel_type
= copy_mount_string(type
);
3137 ret
= PTR_ERR(kernel_type
);
3138 if (IS_ERR(kernel_type
))
3141 kernel_dev
= copy_mount_string(dev_name
);
3142 ret
= PTR_ERR(kernel_dev
);
3143 if (IS_ERR(kernel_dev
))
3146 options
= copy_mount_options(data
);
3147 ret
= PTR_ERR(options
);
3148 if (IS_ERR(options
))
3151 ret
= do_mount(kernel_dev
, dir_name
, kernel_type
, flags
, options
);
3162 SYSCALL_DEFINE5(mount
, char __user
*, dev_name
, char __user
*, dir_name
,
3163 char __user
*, type
, unsigned long, flags
, void __user
*, data
)
3165 return ksys_mount(dev_name
, dir_name
, type
, flags
, data
);
3169 * Return true if path is reachable from root
3171 * namespace_sem or mount_lock is held
3173 bool is_path_reachable(struct mount
*mnt
, struct dentry
*dentry
,
3174 const struct path
*root
)
3176 while (&mnt
->mnt
!= root
->mnt
&& mnt_has_parent(mnt
)) {
3177 dentry
= mnt
->mnt_mountpoint
;
3178 mnt
= mnt
->mnt_parent
;
3180 return &mnt
->mnt
== root
->mnt
&& is_subdir(dentry
, root
->dentry
);
3183 bool path_is_under(const struct path
*path1
, const struct path
*path2
)
3186 read_seqlock_excl(&mount_lock
);
3187 res
= is_path_reachable(real_mount(path1
->mnt
), path1
->dentry
, path2
);
3188 read_sequnlock_excl(&mount_lock
);
3191 EXPORT_SYMBOL(path_is_under
);
3194 * pivot_root Semantics:
3195 * Moves the root file system of the current process to the directory put_old,
3196 * makes new_root as the new root file system of the current process, and sets
3197 * root/cwd of all processes which had them on the current root to new_root.
3200 * The new_root and put_old must be directories, and must not be on the
3201 * same file system as the current process root. The put_old must be
3202 * underneath new_root, i.e. adding a non-zero number of /.. to the string
3203 * pointed to by put_old must yield the same directory as new_root. No other
3204 * file system may be mounted on put_old. After all, new_root is a mountpoint.
3206 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
3207 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
3208 * in this situation.
3211 * - we don't move root/cwd if they are not at the root (reason: if something
3212 * cared enough to change them, it's probably wrong to force them elsewhere)
3213 * - it's okay to pick a root that isn't the root of a file system, e.g.
3214 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
3215 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
3218 SYSCALL_DEFINE2(pivot_root
, const char __user
*, new_root
,
3219 const char __user
*, put_old
)
3221 struct path
new, old
, parent_path
, root_parent
, root
;
3222 struct mount
*new_mnt
, *root_mnt
, *old_mnt
;
3223 struct mountpoint
*old_mp
, *root_mp
;
3229 error
= user_path_dir(new_root
, &new);
3233 error
= user_path_dir(put_old
, &old
);
3237 error
= security_sb_pivotroot(&old
, &new);
3241 get_fs_root(current
->fs
, &root
);
3242 old_mp
= lock_mount(&old
);
3243 error
= PTR_ERR(old_mp
);
3248 new_mnt
= real_mount(new.mnt
);
3249 root_mnt
= real_mount(root
.mnt
);
3250 old_mnt
= real_mount(old
.mnt
);
3251 if (IS_MNT_SHARED(old_mnt
) ||
3252 IS_MNT_SHARED(new_mnt
->mnt_parent
) ||
3253 IS_MNT_SHARED(root_mnt
->mnt_parent
))
3255 if (!check_mnt(root_mnt
) || !check_mnt(new_mnt
))
3257 if (new_mnt
->mnt
.mnt_flags
& MNT_LOCKED
)
3260 if (d_unlinked(new.dentry
))
3263 if (new_mnt
== root_mnt
|| old_mnt
== root_mnt
)
3264 goto out4
; /* loop, on the same file system */
3266 if (root
.mnt
->mnt_root
!= root
.dentry
)
3267 goto out4
; /* not a mountpoint */
3268 if (!mnt_has_parent(root_mnt
))
3269 goto out4
; /* not attached */
3270 root_mp
= root_mnt
->mnt_mp
;
3271 if (new.mnt
->mnt_root
!= new.dentry
)
3272 goto out4
; /* not a mountpoint */
3273 if (!mnt_has_parent(new_mnt
))
3274 goto out4
; /* not attached */
3275 /* make sure we can reach put_old from new_root */
3276 if (!is_path_reachable(old_mnt
, old
.dentry
, &new))
3278 /* make certain new is below the root */
3279 if (!is_path_reachable(new_mnt
, new.dentry
, &root
))
3281 root_mp
->m_count
++; /* pin it so it won't go away */
3283 detach_mnt(new_mnt
, &parent_path
);
3284 detach_mnt(root_mnt
, &root_parent
);
3285 if (root_mnt
->mnt
.mnt_flags
& MNT_LOCKED
) {
3286 new_mnt
->mnt
.mnt_flags
|= MNT_LOCKED
;
3287 root_mnt
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
3289 /* mount old root on put_old */
3290 attach_mnt(root_mnt
, old_mnt
, old_mp
);
3291 /* mount new_root on / */
3292 attach_mnt(new_mnt
, real_mount(root_parent
.mnt
), root_mp
);
3293 touch_mnt_namespace(current
->nsproxy
->mnt_ns
);
3294 /* A moved mount should not expire automatically */
3295 list_del_init(&new_mnt
->mnt_expire
);
3296 put_mountpoint(root_mp
);
3297 unlock_mount_hash();
3298 chroot_fs_refs(&root
, &new);
3301 unlock_mount(old_mp
);
3303 path_put(&root_parent
);
3304 path_put(&parent_path
);
3316 static void __init
init_mount_tree(void)
3318 struct vfsmount
*mnt
;
3320 struct mnt_namespace
*ns
;
3322 struct file_system_type
*type
;
3324 type
= get_fs_type("rootfs");
3326 panic("Can't find rootfs type");
3327 mnt
= vfs_kern_mount(type
, 0, "rootfs", NULL
);
3328 put_filesystem(type
);
3330 panic("Can't create rootfs");
3332 ns
= alloc_mnt_ns(&init_user_ns
, false);
3334 panic("Can't allocate initial namespace");
3335 m
= real_mount(mnt
);
3339 list_add(&m
->mnt_list
, &ns
->list
);
3340 init_task
.nsproxy
->mnt_ns
= ns
;
3344 root
.dentry
= mnt
->mnt_root
;
3345 mnt
->mnt_flags
|= MNT_LOCKED
;
3347 set_fs_pwd(current
->fs
, &root
);
3348 set_fs_root(current
->fs
, &root
);
3351 void __init
mnt_init(void)
3355 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct mount
),
3356 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
);
3358 mount_hashtable
= alloc_large_system_hash("Mount-cache",
3359 sizeof(struct hlist_head
),
3362 &m_hash_shift
, &m_hash_mask
, 0, 0);
3363 mountpoint_hashtable
= alloc_large_system_hash("Mountpoint-cache",
3364 sizeof(struct hlist_head
),
3367 &mp_hash_shift
, &mp_hash_mask
, 0, 0);
3369 if (!mount_hashtable
|| !mountpoint_hashtable
)
3370 panic("Failed to allocate mount hash table\n");
3376 printk(KERN_WARNING
"%s: sysfs_init error: %d\n",
3378 fs_kobj
= kobject_create_and_add("fs", NULL
);
3380 printk(KERN_WARNING
"%s: kobj create error\n", __func__
);
3385 void put_mnt_ns(struct mnt_namespace
*ns
)
3387 if (!atomic_dec_and_test(&ns
->count
))
3389 drop_collected_mounts(&ns
->root
->mnt
);
3393 struct vfsmount
*kern_mount(struct file_system_type
*type
)
3395 struct vfsmount
*mnt
;
3396 mnt
= vfs_kern_mount(type
, SB_KERNMOUNT
, type
->name
, NULL
);
3399 * it is a longterm mount, don't release mnt until
3400 * we unmount before file sys is unregistered
3402 real_mount(mnt
)->mnt_ns
= MNT_NS_INTERNAL
;
3406 EXPORT_SYMBOL_GPL(kern_mount
);
3408 void kern_unmount(struct vfsmount
*mnt
)
3410 /* release long term mount so mount point can be released */
3411 if (!IS_ERR_OR_NULL(mnt
)) {
3412 real_mount(mnt
)->mnt_ns
= NULL
;
3413 synchronize_rcu(); /* yecchhh... */
3417 EXPORT_SYMBOL(kern_unmount
);
3419 bool our_mnt(struct vfsmount
*mnt
)
3421 return check_mnt(real_mount(mnt
));
3424 bool current_chrooted(void)
3426 /* Does the current process have a non-standard root */
3427 struct path ns_root
;
3428 struct path fs_root
;
3431 /* Find the namespace root */
3432 ns_root
.mnt
= ¤t
->nsproxy
->mnt_ns
->root
->mnt
;
3433 ns_root
.dentry
= ns_root
.mnt
->mnt_root
;
3435 while (d_mountpoint(ns_root
.dentry
) && follow_down_one(&ns_root
))
3438 get_fs_root(current
->fs
, &fs_root
);
3440 chrooted
= !path_equal(&fs_root
, &ns_root
);
3448 static bool mnt_already_visible(struct mnt_namespace
*ns
,
3449 const struct super_block
*sb
,
3452 int new_flags
= *new_mnt_flags
;
3454 bool visible
= false;
3456 down_read(&namespace_sem
);
3457 list_for_each_entry(mnt
, &ns
->list
, mnt_list
) {
3458 struct mount
*child
;
3461 if (mnt
->mnt
.mnt_sb
->s_type
!= sb
->s_type
)
3464 /* This mount is not fully visible if it's root directory
3465 * is not the root directory of the filesystem.
3467 if (mnt
->mnt
.mnt_root
!= mnt
->mnt
.mnt_sb
->s_root
)
3470 /* A local view of the mount flags */
3471 mnt_flags
= mnt
->mnt
.mnt_flags
;
3473 /* Don't miss readonly hidden in the superblock flags */
3474 if (sb_rdonly(mnt
->mnt
.mnt_sb
))
3475 mnt_flags
|= MNT_LOCK_READONLY
;
3477 /* Verify the mount flags are equal to or more permissive
3478 * than the proposed new mount.
3480 if ((mnt_flags
& MNT_LOCK_READONLY
) &&
3481 !(new_flags
& MNT_READONLY
))
3483 if ((mnt_flags
& MNT_LOCK_ATIME
) &&
3484 ((mnt_flags
& MNT_ATIME_MASK
) != (new_flags
& MNT_ATIME_MASK
)))
3487 /* This mount is not fully visible if there are any
3488 * locked child mounts that cover anything except for
3489 * empty directories.
3491 list_for_each_entry(child
, &mnt
->mnt_mounts
, mnt_child
) {
3492 struct inode
*inode
= child
->mnt_mountpoint
->d_inode
;
3493 /* Only worry about locked mounts */
3494 if (!(child
->mnt
.mnt_flags
& MNT_LOCKED
))
3496 /* Is the directory permanetly empty? */
3497 if (!is_empty_dir_inode(inode
))
3500 /* Preserve the locked attributes */
3501 *new_mnt_flags
|= mnt_flags
& (MNT_LOCK_READONLY
| \
3508 up_read(&namespace_sem
);
3512 static bool mount_too_revealing(const struct super_block
*sb
, int *new_mnt_flags
)
3514 const unsigned long required_iflags
= SB_I_NOEXEC
| SB_I_NODEV
;
3515 struct mnt_namespace
*ns
= current
->nsproxy
->mnt_ns
;
3516 unsigned long s_iflags
;
3518 if (ns
->user_ns
== &init_user_ns
)
3521 /* Can this filesystem be too revealing? */
3522 s_iflags
= sb
->s_iflags
;
3523 if (!(s_iflags
& SB_I_USERNS_VISIBLE
))
3526 if ((s_iflags
& required_iflags
) != required_iflags
) {
3527 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
3532 return !mnt_already_visible(ns
, sb
, new_mnt_flags
);
3535 bool mnt_may_suid(struct vfsmount
*mnt
)
3538 * Foreign mounts (accessed via fchdir or through /proc
3539 * symlinks) are always treated as if they are nosuid. This
3540 * prevents namespaces from trusting potentially unsafe
3541 * suid/sgid bits, file caps, or security labels that originate
3542 * in other namespaces.
3544 return !(mnt
->mnt_flags
& MNT_NOSUID
) && check_mnt(real_mount(mnt
)) &&
3545 current_in_userns(mnt
->mnt_sb
->s_user_ns
);
3548 static struct ns_common
*mntns_get(struct task_struct
*task
)
3550 struct ns_common
*ns
= NULL
;
3551 struct nsproxy
*nsproxy
;
3554 nsproxy
= task
->nsproxy
;
3556 ns
= &nsproxy
->mnt_ns
->ns
;
3557 get_mnt_ns(to_mnt_ns(ns
));
3564 static void mntns_put(struct ns_common
*ns
)
3566 put_mnt_ns(to_mnt_ns(ns
));
3569 static int mntns_install(struct nsproxy
*nsproxy
, struct ns_common
*ns
)
3571 struct fs_struct
*fs
= current
->fs
;
3572 struct mnt_namespace
*mnt_ns
= to_mnt_ns(ns
), *old_mnt_ns
;
3576 if (!ns_capable(mnt_ns
->user_ns
, CAP_SYS_ADMIN
) ||
3577 !ns_capable(current_user_ns(), CAP_SYS_CHROOT
) ||
3578 !ns_capable(current_user_ns(), CAP_SYS_ADMIN
))
3581 if (is_anon_ns(mnt_ns
))
3588 old_mnt_ns
= nsproxy
->mnt_ns
;
3589 nsproxy
->mnt_ns
= mnt_ns
;
3592 err
= vfs_path_lookup(mnt_ns
->root
->mnt
.mnt_root
, &mnt_ns
->root
->mnt
,
3593 "/", LOOKUP_DOWN
, &root
);
3595 /* revert to old namespace */
3596 nsproxy
->mnt_ns
= old_mnt_ns
;
3601 put_mnt_ns(old_mnt_ns
);
3603 /* Update the pwd and root */
3604 set_fs_pwd(fs
, &root
);
3605 set_fs_root(fs
, &root
);
3611 static struct user_namespace
*mntns_owner(struct ns_common
*ns
)
3613 return to_mnt_ns(ns
)->user_ns
;
3616 const struct proc_ns_operations mntns_operations
= {
3618 .type
= CLONE_NEWNS
,
3621 .install
= mntns_install
,
3622 .owner
= mntns_owner
,