1 // SPDX-License-Identifier: GPL-2.0-only
5 * (C) Copyright Al Viro 2000, 2001
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/file.h>
24 #include <linux/uaccess.h>
25 #include <linux/proc_ns.h>
26 #include <linux/magic.h>
27 #include <linux/memblock.h>
28 #include <linux/proc_fs.h>
29 #include <linux/task_work.h>
30 #include <linux/sched/task.h>
31 #include <uapi/linux/mount.h>
32 #include <linux/fs_context.h>
33 #include <linux/shmem_fs.h>
38 /* Maximum number of mounts in a mount namespace */
39 unsigned int sysctl_mount_max __read_mostly
= 100000;
41 static unsigned int m_hash_mask __read_mostly
;
42 static unsigned int m_hash_shift __read_mostly
;
43 static unsigned int mp_hash_mask __read_mostly
;
44 static unsigned int mp_hash_shift __read_mostly
;
46 static __initdata
unsigned long mhash_entries
;
47 static int __init
set_mhash_entries(char *str
)
51 mhash_entries
= simple_strtoul(str
, &str
, 0);
54 __setup("mhash_entries=", set_mhash_entries
);
56 static __initdata
unsigned long mphash_entries
;
57 static int __init
set_mphash_entries(char *str
)
61 mphash_entries
= simple_strtoul(str
, &str
, 0);
64 __setup("mphash_entries=", set_mphash_entries
);
67 static DEFINE_IDA(mnt_id_ida
);
68 static DEFINE_IDA(mnt_group_ida
);
70 static struct hlist_head
*mount_hashtable __read_mostly
;
71 static struct hlist_head
*mountpoint_hashtable __read_mostly
;
72 static struct kmem_cache
*mnt_cache __read_mostly
;
73 static DECLARE_RWSEM(namespace_sem
);
74 static HLIST_HEAD(unmounted
); /* protected by namespace_sem */
75 static LIST_HEAD(ex_mountpoints
); /* protected by namespace_sem */
78 unsigned int attr_set
;
79 unsigned int attr_clr
;
80 unsigned int propagation
;
81 unsigned int lookup_flags
;
83 struct user_namespace
*mnt_userns
;
87 struct kobject
*fs_kobj
;
88 EXPORT_SYMBOL_GPL(fs_kobj
);
91 * vfsmount lock may be taken for read to prevent changes to the
92 * vfsmount hash, ie. during mountpoint lookups or walking back
95 * It should be taken for write in all cases where the vfsmount
96 * tree or hash is modified or when a vfsmount structure is modified.
98 __cacheline_aligned_in_smp
DEFINE_SEQLOCK(mount_lock
);
100 static inline void lock_mount_hash(void)
102 write_seqlock(&mount_lock
);
105 static inline void unlock_mount_hash(void)
107 write_sequnlock(&mount_lock
);
110 static inline struct hlist_head
*m_hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
112 unsigned long tmp
= ((unsigned long)mnt
/ L1_CACHE_BYTES
);
113 tmp
+= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
114 tmp
= tmp
+ (tmp
>> m_hash_shift
);
115 return &mount_hashtable
[tmp
& m_hash_mask
];
118 static inline struct hlist_head
*mp_hash(struct dentry
*dentry
)
120 unsigned long tmp
= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
121 tmp
= tmp
+ (tmp
>> mp_hash_shift
);
122 return &mountpoint_hashtable
[tmp
& mp_hash_mask
];
125 static int mnt_alloc_id(struct mount
*mnt
)
127 int res
= ida_alloc(&mnt_id_ida
, GFP_KERNEL
);
135 static void mnt_free_id(struct mount
*mnt
)
137 ida_free(&mnt_id_ida
, mnt
->mnt_id
);
141 * Allocate a new peer group ID
143 static int mnt_alloc_group_id(struct mount
*mnt
)
145 int res
= ida_alloc_min(&mnt_group_ida
, 1, GFP_KERNEL
);
149 mnt
->mnt_group_id
= res
;
154 * Release a peer group ID
156 void mnt_release_group_id(struct mount
*mnt
)
158 ida_free(&mnt_group_ida
, mnt
->mnt_group_id
);
159 mnt
->mnt_group_id
= 0;
163 * vfsmount lock must be held for read
165 static inline void mnt_add_count(struct mount
*mnt
, int n
)
168 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, n
);
177 * vfsmount lock must be held for write
179 int mnt_get_count(struct mount
*mnt
)
185 for_each_possible_cpu(cpu
) {
186 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_count
;
191 return mnt
->mnt_count
;
195 static struct mount
*alloc_vfsmnt(const char *name
)
197 struct mount
*mnt
= kmem_cache_zalloc(mnt_cache
, GFP_KERNEL
);
201 err
= mnt_alloc_id(mnt
);
206 mnt
->mnt_devname
= kstrdup_const(name
, GFP_KERNEL
);
207 if (!mnt
->mnt_devname
)
212 mnt
->mnt_pcp
= alloc_percpu(struct mnt_pcp
);
214 goto out_free_devname
;
216 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, 1);
219 mnt
->mnt_writers
= 0;
222 INIT_HLIST_NODE(&mnt
->mnt_hash
);
223 INIT_LIST_HEAD(&mnt
->mnt_child
);
224 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
225 INIT_LIST_HEAD(&mnt
->mnt_list
);
226 INIT_LIST_HEAD(&mnt
->mnt_expire
);
227 INIT_LIST_HEAD(&mnt
->mnt_share
);
228 INIT_LIST_HEAD(&mnt
->mnt_slave_list
);
229 INIT_LIST_HEAD(&mnt
->mnt_slave
);
230 INIT_HLIST_NODE(&mnt
->mnt_mp_list
);
231 INIT_LIST_HEAD(&mnt
->mnt_umounting
);
232 INIT_HLIST_HEAD(&mnt
->mnt_stuck_children
);
233 mnt
->mnt
.mnt_userns
= &init_user_ns
;
239 kfree_const(mnt
->mnt_devname
);
244 kmem_cache_free(mnt_cache
, mnt
);
249 * Most r/o checks on a fs are for operations that take
250 * discrete amounts of time, like a write() or unlink().
251 * We must keep track of when those operations start
252 * (for permission checks) and when they end, so that
253 * we can determine when writes are able to occur to
257 * __mnt_is_readonly: check whether a mount is read-only
258 * @mnt: the mount to check for its write status
260 * This shouldn't be used directly ouside of the VFS.
261 * It does not guarantee that the filesystem will stay
262 * r/w, just that it is right *now*. This can not and
263 * should not be used in place of IS_RDONLY(inode).
264 * mnt_want/drop_write() will _keep_ the filesystem
267 bool __mnt_is_readonly(struct vfsmount
*mnt
)
269 return (mnt
->mnt_flags
& MNT_READONLY
) || sb_rdonly(mnt
->mnt_sb
);
271 EXPORT_SYMBOL_GPL(__mnt_is_readonly
);
273 static inline void mnt_inc_writers(struct mount
*mnt
)
276 this_cpu_inc(mnt
->mnt_pcp
->mnt_writers
);
282 static inline void mnt_dec_writers(struct mount
*mnt
)
285 this_cpu_dec(mnt
->mnt_pcp
->mnt_writers
);
291 static unsigned int mnt_get_writers(struct mount
*mnt
)
294 unsigned int count
= 0;
297 for_each_possible_cpu(cpu
) {
298 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_writers
;
303 return mnt
->mnt_writers
;
307 static int mnt_is_readonly(struct vfsmount
*mnt
)
309 if (mnt
->mnt_sb
->s_readonly_remount
)
311 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
313 return __mnt_is_readonly(mnt
);
317 * Most r/o & frozen checks on a fs are for operations that take discrete
318 * amounts of time, like a write() or unlink(). We must keep track of when
319 * those operations start (for permission checks) and when they end, so that we
320 * can determine when writes are able to occur to a filesystem.
323 * __mnt_want_write - get write access to a mount without freeze protection
324 * @m: the mount on which to take a write
326 * This tells the low-level filesystem that a write is about to be performed to
327 * it, and makes sure that writes are allowed (mnt it read-write) before
328 * returning success. This operation does not protect against filesystem being
329 * frozen. When the write operation is finished, __mnt_drop_write() must be
330 * called. This is effectively a refcount.
332 int __mnt_want_write(struct vfsmount
*m
)
334 struct mount
*mnt
= real_mount(m
);
338 mnt_inc_writers(mnt
);
340 * The store to mnt_inc_writers must be visible before we pass
341 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
342 * incremented count after it has set MNT_WRITE_HOLD.
345 while (READ_ONCE(mnt
->mnt
.mnt_flags
) & MNT_WRITE_HOLD
)
348 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
349 * be set to match its requirements. So we must not load that until
350 * MNT_WRITE_HOLD is cleared.
353 if (mnt_is_readonly(m
)) {
354 mnt_dec_writers(mnt
);
363 * mnt_want_write - get write access to a mount
364 * @m: the mount on which to take a write
366 * This tells the low-level filesystem that a write is about to be performed to
367 * it, and makes sure that writes are allowed (mount is read-write, filesystem
368 * is not frozen) before returning success. When the write operation is
369 * finished, mnt_drop_write() must be called. This is effectively a refcount.
371 int mnt_want_write(struct vfsmount
*m
)
375 sb_start_write(m
->mnt_sb
);
376 ret
= __mnt_want_write(m
);
378 sb_end_write(m
->mnt_sb
);
381 EXPORT_SYMBOL_GPL(mnt_want_write
);
384 * __mnt_want_write_file - get write access to a file's mount
385 * @file: the file who's mount on which to take a write
387 * This is like __mnt_want_write, but if the file is already open for writing it
388 * skips incrementing mnt_writers (since the open file already has a reference)
389 * and instead only does the check for emergency r/o remounts. This must be
390 * paired with __mnt_drop_write_file.
392 int __mnt_want_write_file(struct file
*file
)
394 if (file
->f_mode
& FMODE_WRITER
) {
396 * Superblock may have become readonly while there are still
397 * writable fd's, e.g. due to a fs error with errors=remount-ro
399 if (__mnt_is_readonly(file
->f_path
.mnt
))
403 return __mnt_want_write(file
->f_path
.mnt
);
407 * mnt_want_write_file - get write access to a file's mount
408 * @file: the file who's mount on which to take a write
410 * This is like mnt_want_write, but if the file is already open for writing it
411 * skips incrementing mnt_writers (since the open file already has a reference)
412 * and instead only does the freeze protection and the check for emergency r/o
413 * remounts. This must be paired with mnt_drop_write_file.
415 int mnt_want_write_file(struct file
*file
)
419 sb_start_write(file_inode(file
)->i_sb
);
420 ret
= __mnt_want_write_file(file
);
422 sb_end_write(file_inode(file
)->i_sb
);
425 EXPORT_SYMBOL_GPL(mnt_want_write_file
);
428 * __mnt_drop_write - give up write access to a mount
429 * @mnt: the mount on which to give up write access
431 * Tells the low-level filesystem that we are done
432 * performing writes to it. Must be matched with
433 * __mnt_want_write() call above.
435 void __mnt_drop_write(struct vfsmount
*mnt
)
438 mnt_dec_writers(real_mount(mnt
));
443 * mnt_drop_write - give up write access to a mount
444 * @mnt: the mount on which to give up write access
446 * Tells the low-level filesystem that we are done performing writes to it and
447 * also allows filesystem to be frozen again. Must be matched with
448 * mnt_want_write() call above.
450 void mnt_drop_write(struct vfsmount
*mnt
)
452 __mnt_drop_write(mnt
);
453 sb_end_write(mnt
->mnt_sb
);
455 EXPORT_SYMBOL_GPL(mnt_drop_write
);
457 void __mnt_drop_write_file(struct file
*file
)
459 if (!(file
->f_mode
& FMODE_WRITER
))
460 __mnt_drop_write(file
->f_path
.mnt
);
463 void mnt_drop_write_file(struct file
*file
)
465 __mnt_drop_write_file(file
);
466 sb_end_write(file_inode(file
)->i_sb
);
468 EXPORT_SYMBOL(mnt_drop_write_file
);
470 static inline int mnt_hold_writers(struct mount
*mnt
)
472 mnt
->mnt
.mnt_flags
|= MNT_WRITE_HOLD
;
474 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
475 * should be visible before we do.
480 * With writers on hold, if this value is zero, then there are
481 * definitely no active writers (although held writers may subsequently
482 * increment the count, they'll have to wait, and decrement it after
483 * seeing MNT_READONLY).
485 * It is OK to have counter incremented on one CPU and decremented on
486 * another: the sum will add up correctly. The danger would be when we
487 * sum up each counter, if we read a counter before it is incremented,
488 * but then read another CPU's count which it has been subsequently
489 * decremented from -- we would see more decrements than we should.
490 * MNT_WRITE_HOLD protects against this scenario, because
491 * mnt_want_write first increments count, then smp_mb, then spins on
492 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
493 * we're counting up here.
495 if (mnt_get_writers(mnt
) > 0)
501 static inline void mnt_unhold_writers(struct mount
*mnt
)
504 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
505 * that become unheld will see MNT_READONLY.
508 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
511 static int mnt_make_readonly(struct mount
*mnt
)
515 ret
= mnt_hold_writers(mnt
);
517 mnt
->mnt
.mnt_flags
|= MNT_READONLY
;
518 mnt_unhold_writers(mnt
);
522 int sb_prepare_remount_readonly(struct super_block
*sb
)
527 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
528 if (atomic_long_read(&sb
->s_remove_count
))
532 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
533 if (!(mnt
->mnt
.mnt_flags
& MNT_READONLY
)) {
534 mnt
->mnt
.mnt_flags
|= MNT_WRITE_HOLD
;
536 if (mnt_get_writers(mnt
) > 0) {
542 if (!err
&& atomic_long_read(&sb
->s_remove_count
))
546 sb
->s_readonly_remount
= 1;
549 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
550 if (mnt
->mnt
.mnt_flags
& MNT_WRITE_HOLD
)
551 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
558 static void free_vfsmnt(struct mount
*mnt
)
560 struct user_namespace
*mnt_userns
;
562 mnt_userns
= mnt_user_ns(&mnt
->mnt
);
563 if (mnt_userns
!= &init_user_ns
)
564 put_user_ns(mnt_userns
);
565 kfree_const(mnt
->mnt_devname
);
567 free_percpu(mnt
->mnt_pcp
);
569 kmem_cache_free(mnt_cache
, mnt
);
572 static void delayed_free_vfsmnt(struct rcu_head
*head
)
574 free_vfsmnt(container_of(head
, struct mount
, mnt_rcu
));
577 /* call under rcu_read_lock */
578 int __legitimize_mnt(struct vfsmount
*bastard
, unsigned seq
)
581 if (read_seqretry(&mount_lock
, seq
))
585 mnt
= real_mount(bastard
);
586 mnt_add_count(mnt
, 1);
587 smp_mb(); // see mntput_no_expire()
588 if (likely(!read_seqretry(&mount_lock
, seq
)))
590 if (bastard
->mnt_flags
& MNT_SYNC_UMOUNT
) {
591 mnt_add_count(mnt
, -1);
595 if (unlikely(bastard
->mnt_flags
& MNT_DOOMED
)) {
596 mnt_add_count(mnt
, -1);
601 /* caller will mntput() */
605 /* call under rcu_read_lock */
606 bool legitimize_mnt(struct vfsmount
*bastard
, unsigned seq
)
608 int res
= __legitimize_mnt(bastard
, seq
);
611 if (unlikely(res
< 0)) {
620 * find the first mount at @dentry on vfsmount @mnt.
621 * call under rcu_read_lock()
623 struct mount
*__lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
)
625 struct hlist_head
*head
= m_hash(mnt
, dentry
);
628 hlist_for_each_entry_rcu(p
, head
, mnt_hash
)
629 if (&p
->mnt_parent
->mnt
== mnt
&& p
->mnt_mountpoint
== dentry
)
635 * lookup_mnt - Return the first child mount mounted at path
637 * "First" means first mounted chronologically. If you create the
640 * mount /dev/sda1 /mnt
641 * mount /dev/sda2 /mnt
642 * mount /dev/sda3 /mnt
644 * Then lookup_mnt() on the base /mnt dentry in the root mount will
645 * return successively the root dentry and vfsmount of /dev/sda1, then
646 * /dev/sda2, then /dev/sda3, then NULL.
648 * lookup_mnt takes a reference to the found vfsmount.
650 struct vfsmount
*lookup_mnt(const struct path
*path
)
652 struct mount
*child_mnt
;
658 seq
= read_seqbegin(&mount_lock
);
659 child_mnt
= __lookup_mnt(path
->mnt
, path
->dentry
);
660 m
= child_mnt
? &child_mnt
->mnt
: NULL
;
661 } while (!legitimize_mnt(m
, seq
));
666 static inline void lock_ns_list(struct mnt_namespace
*ns
)
668 spin_lock(&ns
->ns_lock
);
671 static inline void unlock_ns_list(struct mnt_namespace
*ns
)
673 spin_unlock(&ns
->ns_lock
);
676 static inline bool mnt_is_cursor(struct mount
*mnt
)
678 return mnt
->mnt
.mnt_flags
& MNT_CURSOR
;
682 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
683 * current mount namespace.
685 * The common case is dentries are not mountpoints at all and that
686 * test is handled inline. For the slow case when we are actually
687 * dealing with a mountpoint of some kind, walk through all of the
688 * mounts in the current mount namespace and test to see if the dentry
691 * The mount_hashtable is not usable in the context because we
692 * need to identify all mounts that may be in the current mount
693 * namespace not just a mount that happens to have some specified
696 bool __is_local_mountpoint(struct dentry
*dentry
)
698 struct mnt_namespace
*ns
= current
->nsproxy
->mnt_ns
;
700 bool is_covered
= false;
702 down_read(&namespace_sem
);
704 list_for_each_entry(mnt
, &ns
->list
, mnt_list
) {
705 if (mnt_is_cursor(mnt
))
707 is_covered
= (mnt
->mnt_mountpoint
== dentry
);
712 up_read(&namespace_sem
);
717 static struct mountpoint
*lookup_mountpoint(struct dentry
*dentry
)
719 struct hlist_head
*chain
= mp_hash(dentry
);
720 struct mountpoint
*mp
;
722 hlist_for_each_entry(mp
, chain
, m_hash
) {
723 if (mp
->m_dentry
== dentry
) {
731 static struct mountpoint
*get_mountpoint(struct dentry
*dentry
)
733 struct mountpoint
*mp
, *new = NULL
;
736 if (d_mountpoint(dentry
)) {
737 /* might be worth a WARN_ON() */
738 if (d_unlinked(dentry
))
739 return ERR_PTR(-ENOENT
);
741 read_seqlock_excl(&mount_lock
);
742 mp
= lookup_mountpoint(dentry
);
743 read_sequnlock_excl(&mount_lock
);
749 new = kmalloc(sizeof(struct mountpoint
), GFP_KERNEL
);
751 return ERR_PTR(-ENOMEM
);
754 /* Exactly one processes may set d_mounted */
755 ret
= d_set_mounted(dentry
);
757 /* Someone else set d_mounted? */
761 /* The dentry is not available as a mountpoint? */
766 /* Add the new mountpoint to the hash table */
767 read_seqlock_excl(&mount_lock
);
768 new->m_dentry
= dget(dentry
);
770 hlist_add_head(&new->m_hash
, mp_hash(dentry
));
771 INIT_HLIST_HEAD(&new->m_list
);
772 read_sequnlock_excl(&mount_lock
);
782 * vfsmount lock must be held. Additionally, the caller is responsible
783 * for serializing calls for given disposal list.
785 static void __put_mountpoint(struct mountpoint
*mp
, struct list_head
*list
)
787 if (!--mp
->m_count
) {
788 struct dentry
*dentry
= mp
->m_dentry
;
789 BUG_ON(!hlist_empty(&mp
->m_list
));
790 spin_lock(&dentry
->d_lock
);
791 dentry
->d_flags
&= ~DCACHE_MOUNTED
;
792 spin_unlock(&dentry
->d_lock
);
793 dput_to_list(dentry
, list
);
794 hlist_del(&mp
->m_hash
);
799 /* called with namespace_lock and vfsmount lock */
800 static void put_mountpoint(struct mountpoint
*mp
)
802 __put_mountpoint(mp
, &ex_mountpoints
);
805 static inline int check_mnt(struct mount
*mnt
)
807 return mnt
->mnt_ns
== current
->nsproxy
->mnt_ns
;
811 * vfsmount lock must be held for write
813 static void touch_mnt_namespace(struct mnt_namespace
*ns
)
817 wake_up_interruptible(&ns
->poll
);
822 * vfsmount lock must be held for write
824 static void __touch_mnt_namespace(struct mnt_namespace
*ns
)
826 if (ns
&& ns
->event
!= event
) {
828 wake_up_interruptible(&ns
->poll
);
833 * vfsmount lock must be held for write
835 static struct mountpoint
*unhash_mnt(struct mount
*mnt
)
837 struct mountpoint
*mp
;
838 mnt
->mnt_parent
= mnt
;
839 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
840 list_del_init(&mnt
->mnt_child
);
841 hlist_del_init_rcu(&mnt
->mnt_hash
);
842 hlist_del_init(&mnt
->mnt_mp_list
);
849 * vfsmount lock must be held for write
851 static void umount_mnt(struct mount
*mnt
)
853 put_mountpoint(unhash_mnt(mnt
));
857 * vfsmount lock must be held for write
859 void mnt_set_mountpoint(struct mount
*mnt
,
860 struct mountpoint
*mp
,
861 struct mount
*child_mnt
)
864 mnt_add_count(mnt
, 1); /* essentially, that's mntget */
865 child_mnt
->mnt_mountpoint
= mp
->m_dentry
;
866 child_mnt
->mnt_parent
= mnt
;
867 child_mnt
->mnt_mp
= mp
;
868 hlist_add_head(&child_mnt
->mnt_mp_list
, &mp
->m_list
);
871 static void __attach_mnt(struct mount
*mnt
, struct mount
*parent
)
873 hlist_add_head_rcu(&mnt
->mnt_hash
,
874 m_hash(&parent
->mnt
, mnt
->mnt_mountpoint
));
875 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
879 * vfsmount lock must be held for write
881 static void attach_mnt(struct mount
*mnt
,
882 struct mount
*parent
,
883 struct mountpoint
*mp
)
885 mnt_set_mountpoint(parent
, mp
, mnt
);
886 __attach_mnt(mnt
, parent
);
889 void mnt_change_mountpoint(struct mount
*parent
, struct mountpoint
*mp
, struct mount
*mnt
)
891 struct mountpoint
*old_mp
= mnt
->mnt_mp
;
892 struct mount
*old_parent
= mnt
->mnt_parent
;
894 list_del_init(&mnt
->mnt_child
);
895 hlist_del_init(&mnt
->mnt_mp_list
);
896 hlist_del_init_rcu(&mnt
->mnt_hash
);
898 attach_mnt(mnt
, parent
, mp
);
900 put_mountpoint(old_mp
);
901 mnt_add_count(old_parent
, -1);
905 * vfsmount lock must be held for write
907 static void commit_tree(struct mount
*mnt
)
909 struct mount
*parent
= mnt
->mnt_parent
;
912 struct mnt_namespace
*n
= parent
->mnt_ns
;
914 BUG_ON(parent
== mnt
);
916 list_add_tail(&head
, &mnt
->mnt_list
);
917 list_for_each_entry(m
, &head
, mnt_list
)
920 list_splice(&head
, n
->list
.prev
);
922 n
->mounts
+= n
->pending_mounts
;
923 n
->pending_mounts
= 0;
925 __attach_mnt(mnt
, parent
);
926 touch_mnt_namespace(n
);
929 static struct mount
*next_mnt(struct mount
*p
, struct mount
*root
)
931 struct list_head
*next
= p
->mnt_mounts
.next
;
932 if (next
== &p
->mnt_mounts
) {
936 next
= p
->mnt_child
.next
;
937 if (next
!= &p
->mnt_parent
->mnt_mounts
)
942 return list_entry(next
, struct mount
, mnt_child
);
945 static struct mount
*skip_mnt_tree(struct mount
*p
)
947 struct list_head
*prev
= p
->mnt_mounts
.prev
;
948 while (prev
!= &p
->mnt_mounts
) {
949 p
= list_entry(prev
, struct mount
, mnt_child
);
950 prev
= p
->mnt_mounts
.prev
;
956 * vfs_create_mount - Create a mount for a configured superblock
957 * @fc: The configuration context with the superblock attached
959 * Create a mount to an already configured superblock. If necessary, the
960 * caller should invoke vfs_get_tree() before calling this.
962 * Note that this does not attach the mount to anything.
964 struct vfsmount
*vfs_create_mount(struct fs_context
*fc
)
969 return ERR_PTR(-EINVAL
);
971 mnt
= alloc_vfsmnt(fc
->source
?: "none");
973 return ERR_PTR(-ENOMEM
);
975 if (fc
->sb_flags
& SB_KERNMOUNT
)
976 mnt
->mnt
.mnt_flags
= MNT_INTERNAL
;
978 atomic_inc(&fc
->root
->d_sb
->s_active
);
979 mnt
->mnt
.mnt_sb
= fc
->root
->d_sb
;
980 mnt
->mnt
.mnt_root
= dget(fc
->root
);
981 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
982 mnt
->mnt_parent
= mnt
;
985 list_add_tail(&mnt
->mnt_instance
, &mnt
->mnt
.mnt_sb
->s_mounts
);
989 EXPORT_SYMBOL(vfs_create_mount
);
991 struct vfsmount
*fc_mount(struct fs_context
*fc
)
993 int err
= vfs_get_tree(fc
);
995 up_write(&fc
->root
->d_sb
->s_umount
);
996 return vfs_create_mount(fc
);
1000 EXPORT_SYMBOL(fc_mount
);
1002 struct vfsmount
*vfs_kern_mount(struct file_system_type
*type
,
1003 int flags
, const char *name
,
1006 struct fs_context
*fc
;
1007 struct vfsmount
*mnt
;
1011 return ERR_PTR(-EINVAL
);
1013 fc
= fs_context_for_mount(type
, flags
);
1015 return ERR_CAST(fc
);
1018 ret
= vfs_parse_fs_string(fc
, "source",
1019 name
, strlen(name
));
1021 ret
= parse_monolithic_mount_data(fc
, data
);
1030 EXPORT_SYMBOL_GPL(vfs_kern_mount
);
1033 vfs_submount(const struct dentry
*mountpoint
, struct file_system_type
*type
,
1034 const char *name
, void *data
)
1036 /* Until it is worked out how to pass the user namespace
1037 * through from the parent mount to the submount don't support
1038 * unprivileged mounts with submounts.
1040 if (mountpoint
->d_sb
->s_user_ns
!= &init_user_ns
)
1041 return ERR_PTR(-EPERM
);
1043 return vfs_kern_mount(type
, SB_SUBMOUNT
, name
, data
);
1045 EXPORT_SYMBOL_GPL(vfs_submount
);
1047 static struct mount
*clone_mnt(struct mount
*old
, struct dentry
*root
,
1050 struct super_block
*sb
= old
->mnt
.mnt_sb
;
1054 mnt
= alloc_vfsmnt(old
->mnt_devname
);
1056 return ERR_PTR(-ENOMEM
);
1058 if (flag
& (CL_SLAVE
| CL_PRIVATE
| CL_SHARED_TO_SLAVE
))
1059 mnt
->mnt_group_id
= 0; /* not a peer of original */
1061 mnt
->mnt_group_id
= old
->mnt_group_id
;
1063 if ((flag
& CL_MAKE_SHARED
) && !mnt
->mnt_group_id
) {
1064 err
= mnt_alloc_group_id(mnt
);
1069 mnt
->mnt
.mnt_flags
= old
->mnt
.mnt_flags
;
1070 mnt
->mnt
.mnt_flags
&= ~(MNT_WRITE_HOLD
|MNT_MARKED
|MNT_INTERNAL
);
1072 atomic_inc(&sb
->s_active
);
1073 mnt
->mnt
.mnt_userns
= mnt_user_ns(&old
->mnt
);
1074 if (mnt
->mnt
.mnt_userns
!= &init_user_ns
)
1075 mnt
->mnt
.mnt_userns
= get_user_ns(mnt
->mnt
.mnt_userns
);
1076 mnt
->mnt
.mnt_sb
= sb
;
1077 mnt
->mnt
.mnt_root
= dget(root
);
1078 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
1079 mnt
->mnt_parent
= mnt
;
1081 list_add_tail(&mnt
->mnt_instance
, &sb
->s_mounts
);
1082 unlock_mount_hash();
1084 if ((flag
& CL_SLAVE
) ||
1085 ((flag
& CL_SHARED_TO_SLAVE
) && IS_MNT_SHARED(old
))) {
1086 list_add(&mnt
->mnt_slave
, &old
->mnt_slave_list
);
1087 mnt
->mnt_master
= old
;
1088 CLEAR_MNT_SHARED(mnt
);
1089 } else if (!(flag
& CL_PRIVATE
)) {
1090 if ((flag
& CL_MAKE_SHARED
) || IS_MNT_SHARED(old
))
1091 list_add(&mnt
->mnt_share
, &old
->mnt_share
);
1092 if (IS_MNT_SLAVE(old
))
1093 list_add(&mnt
->mnt_slave
, &old
->mnt_slave
);
1094 mnt
->mnt_master
= old
->mnt_master
;
1096 CLEAR_MNT_SHARED(mnt
);
1098 if (flag
& CL_MAKE_SHARED
)
1099 set_mnt_shared(mnt
);
1101 /* stick the duplicate mount on the same expiry list
1102 * as the original if that was on one */
1103 if (flag
& CL_EXPIRE
) {
1104 if (!list_empty(&old
->mnt_expire
))
1105 list_add(&mnt
->mnt_expire
, &old
->mnt_expire
);
1113 return ERR_PTR(err
);
1116 static void cleanup_mnt(struct mount
*mnt
)
1118 struct hlist_node
*p
;
1121 * The warning here probably indicates that somebody messed
1122 * up a mnt_want/drop_write() pair. If this happens, the
1123 * filesystem was probably unable to make r/w->r/o transitions.
1124 * The locking used to deal with mnt_count decrement provides barriers,
1125 * so mnt_get_writers() below is safe.
1127 WARN_ON(mnt_get_writers(mnt
));
1128 if (unlikely(mnt
->mnt_pins
.first
))
1130 hlist_for_each_entry_safe(m
, p
, &mnt
->mnt_stuck_children
, mnt_umount
) {
1131 hlist_del(&m
->mnt_umount
);
1134 fsnotify_vfsmount_delete(&mnt
->mnt
);
1135 dput(mnt
->mnt
.mnt_root
);
1136 deactivate_super(mnt
->mnt
.mnt_sb
);
1138 call_rcu(&mnt
->mnt_rcu
, delayed_free_vfsmnt
);
1141 static void __cleanup_mnt(struct rcu_head
*head
)
1143 cleanup_mnt(container_of(head
, struct mount
, mnt_rcu
));
1146 static LLIST_HEAD(delayed_mntput_list
);
1147 static void delayed_mntput(struct work_struct
*unused
)
1149 struct llist_node
*node
= llist_del_all(&delayed_mntput_list
);
1150 struct mount
*m
, *t
;
1152 llist_for_each_entry_safe(m
, t
, node
, mnt_llist
)
1155 static DECLARE_DELAYED_WORK(delayed_mntput_work
, delayed_mntput
);
1157 static void mntput_no_expire(struct mount
*mnt
)
1163 if (likely(READ_ONCE(mnt
->mnt_ns
))) {
1165 * Since we don't do lock_mount_hash() here,
1166 * ->mnt_ns can change under us. However, if it's
1167 * non-NULL, then there's a reference that won't
1168 * be dropped until after an RCU delay done after
1169 * turning ->mnt_ns NULL. So if we observe it
1170 * non-NULL under rcu_read_lock(), the reference
1171 * we are dropping is not the final one.
1173 mnt_add_count(mnt
, -1);
1179 * make sure that if __legitimize_mnt() has not seen us grab
1180 * mount_lock, we'll see their refcount increment here.
1183 mnt_add_count(mnt
, -1);
1184 count
= mnt_get_count(mnt
);
1188 unlock_mount_hash();
1191 if (unlikely(mnt
->mnt
.mnt_flags
& MNT_DOOMED
)) {
1193 unlock_mount_hash();
1196 mnt
->mnt
.mnt_flags
|= MNT_DOOMED
;
1199 list_del(&mnt
->mnt_instance
);
1201 if (unlikely(!list_empty(&mnt
->mnt_mounts
))) {
1202 struct mount
*p
, *tmp
;
1203 list_for_each_entry_safe(p
, tmp
, &mnt
->mnt_mounts
, mnt_child
) {
1204 __put_mountpoint(unhash_mnt(p
), &list
);
1205 hlist_add_head(&p
->mnt_umount
, &mnt
->mnt_stuck_children
);
1208 unlock_mount_hash();
1209 shrink_dentry_list(&list
);
1211 if (likely(!(mnt
->mnt
.mnt_flags
& MNT_INTERNAL
))) {
1212 struct task_struct
*task
= current
;
1213 if (likely(!(task
->flags
& PF_KTHREAD
))) {
1214 init_task_work(&mnt
->mnt_rcu
, __cleanup_mnt
);
1215 if (!task_work_add(task
, &mnt
->mnt_rcu
, TWA_RESUME
))
1218 if (llist_add(&mnt
->mnt_llist
, &delayed_mntput_list
))
1219 schedule_delayed_work(&delayed_mntput_work
, 1);
1225 void mntput(struct vfsmount
*mnt
)
1228 struct mount
*m
= real_mount(mnt
);
1229 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1230 if (unlikely(m
->mnt_expiry_mark
))
1231 m
->mnt_expiry_mark
= 0;
1232 mntput_no_expire(m
);
1235 EXPORT_SYMBOL(mntput
);
1237 struct vfsmount
*mntget(struct vfsmount
*mnt
)
1240 mnt_add_count(real_mount(mnt
), 1);
1243 EXPORT_SYMBOL(mntget
);
1245 /* path_is_mountpoint() - Check if path is a mount in the current
1248 * d_mountpoint() can only be used reliably to establish if a dentry is
1249 * not mounted in any namespace and that common case is handled inline.
1250 * d_mountpoint() isn't aware of the possibility there may be multiple
1251 * mounts using a given dentry in a different namespace. This function
1252 * checks if the passed in path is a mountpoint rather than the dentry
1255 bool path_is_mountpoint(const struct path
*path
)
1260 if (!d_mountpoint(path
->dentry
))
1265 seq
= read_seqbegin(&mount_lock
);
1266 res
= __path_is_mountpoint(path
);
1267 } while (read_seqretry(&mount_lock
, seq
));
1272 EXPORT_SYMBOL(path_is_mountpoint
);
1274 struct vfsmount
*mnt_clone_internal(const struct path
*path
)
1277 p
= clone_mnt(real_mount(path
->mnt
), path
->dentry
, CL_PRIVATE
);
1280 p
->mnt
.mnt_flags
|= MNT_INTERNAL
;
1284 #ifdef CONFIG_PROC_FS
1285 static struct mount
*mnt_list_next(struct mnt_namespace
*ns
,
1286 struct list_head
*p
)
1288 struct mount
*mnt
, *ret
= NULL
;
1291 list_for_each_continue(p
, &ns
->list
) {
1292 mnt
= list_entry(p
, typeof(*mnt
), mnt_list
);
1293 if (!mnt_is_cursor(mnt
)) {
1303 /* iterator; we want it to have access to namespace_sem, thus here... */
1304 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
1306 struct proc_mounts
*p
= m
->private;
1307 struct list_head
*prev
;
1309 down_read(&namespace_sem
);
1311 prev
= &p
->ns
->list
;
1313 prev
= &p
->cursor
.mnt_list
;
1315 /* Read after we'd reached the end? */
1316 if (list_empty(prev
))
1320 return mnt_list_next(p
->ns
, prev
);
1323 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1325 struct proc_mounts
*p
= m
->private;
1326 struct mount
*mnt
= v
;
1329 return mnt_list_next(p
->ns
, &mnt
->mnt_list
);
1332 static void m_stop(struct seq_file
*m
, void *v
)
1334 struct proc_mounts
*p
= m
->private;
1335 struct mount
*mnt
= v
;
1337 lock_ns_list(p
->ns
);
1339 list_move_tail(&p
->cursor
.mnt_list
, &mnt
->mnt_list
);
1341 list_del_init(&p
->cursor
.mnt_list
);
1342 unlock_ns_list(p
->ns
);
1343 up_read(&namespace_sem
);
1346 static int m_show(struct seq_file
*m
, void *v
)
1348 struct proc_mounts
*p
= m
->private;
1349 struct mount
*r
= v
;
1350 return p
->show(m
, &r
->mnt
);
1353 const struct seq_operations mounts_op
= {
1360 void mnt_cursor_del(struct mnt_namespace
*ns
, struct mount
*cursor
)
1362 down_read(&namespace_sem
);
1364 list_del(&cursor
->mnt_list
);
1366 up_read(&namespace_sem
);
1368 #endif /* CONFIG_PROC_FS */
1371 * may_umount_tree - check if a mount tree is busy
1372 * @mnt: root of mount tree
1374 * This is called to check if a tree of mounts has any
1375 * open files, pwds, chroots or sub mounts that are
1378 int may_umount_tree(struct vfsmount
*m
)
1380 struct mount
*mnt
= real_mount(m
);
1381 int actual_refs
= 0;
1382 int minimum_refs
= 0;
1386 /* write lock needed for mnt_get_count */
1388 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1389 actual_refs
+= mnt_get_count(p
);
1392 unlock_mount_hash();
1394 if (actual_refs
> minimum_refs
)
1400 EXPORT_SYMBOL(may_umount_tree
);
1403 * may_umount - check if a mount point is busy
1404 * @mnt: root of mount
1406 * This is called to check if a mount point has any
1407 * open files, pwds, chroots or sub mounts. If the
1408 * mount has sub mounts this will return busy
1409 * regardless of whether the sub mounts are busy.
1411 * Doesn't take quota and stuff into account. IOW, in some cases it will
1412 * give false negatives. The main reason why it's here is that we need
1413 * a non-destructive way to look for easily umountable filesystems.
1415 int may_umount(struct vfsmount
*mnt
)
1418 down_read(&namespace_sem
);
1420 if (propagate_mount_busy(real_mount(mnt
), 2))
1422 unlock_mount_hash();
1423 up_read(&namespace_sem
);
1427 EXPORT_SYMBOL(may_umount
);
1429 static void namespace_unlock(void)
1431 struct hlist_head head
;
1432 struct hlist_node
*p
;
1436 hlist_move_list(&unmounted
, &head
);
1437 list_splice_init(&ex_mountpoints
, &list
);
1439 up_write(&namespace_sem
);
1441 shrink_dentry_list(&list
);
1443 if (likely(hlist_empty(&head
)))
1446 synchronize_rcu_expedited();
1448 hlist_for_each_entry_safe(m
, p
, &head
, mnt_umount
) {
1449 hlist_del(&m
->mnt_umount
);
1454 static inline void namespace_lock(void)
1456 down_write(&namespace_sem
);
1459 enum umount_tree_flags
{
1461 UMOUNT_PROPAGATE
= 2,
1462 UMOUNT_CONNECTED
= 4,
1465 static bool disconnect_mount(struct mount
*mnt
, enum umount_tree_flags how
)
1467 /* Leaving mounts connected is only valid for lazy umounts */
1468 if (how
& UMOUNT_SYNC
)
1471 /* A mount without a parent has nothing to be connected to */
1472 if (!mnt_has_parent(mnt
))
1475 /* Because the reference counting rules change when mounts are
1476 * unmounted and connected, umounted mounts may not be
1477 * connected to mounted mounts.
1479 if (!(mnt
->mnt_parent
->mnt
.mnt_flags
& MNT_UMOUNT
))
1482 /* Has it been requested that the mount remain connected? */
1483 if (how
& UMOUNT_CONNECTED
)
1486 /* Is the mount locked such that it needs to remain connected? */
1487 if (IS_MNT_LOCKED(mnt
))
1490 /* By default disconnect the mount */
1495 * mount_lock must be held
1496 * namespace_sem must be held for write
1498 static void umount_tree(struct mount
*mnt
, enum umount_tree_flags how
)
1500 LIST_HEAD(tmp_list
);
1503 if (how
& UMOUNT_PROPAGATE
)
1504 propagate_mount_unlock(mnt
);
1506 /* Gather the mounts to umount */
1507 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1508 p
->mnt
.mnt_flags
|= MNT_UMOUNT
;
1509 list_move(&p
->mnt_list
, &tmp_list
);
1512 /* Hide the mounts from mnt_mounts */
1513 list_for_each_entry(p
, &tmp_list
, mnt_list
) {
1514 list_del_init(&p
->mnt_child
);
1517 /* Add propogated mounts to the tmp_list */
1518 if (how
& UMOUNT_PROPAGATE
)
1519 propagate_umount(&tmp_list
);
1521 while (!list_empty(&tmp_list
)) {
1522 struct mnt_namespace
*ns
;
1524 p
= list_first_entry(&tmp_list
, struct mount
, mnt_list
);
1525 list_del_init(&p
->mnt_expire
);
1526 list_del_init(&p
->mnt_list
);
1530 __touch_mnt_namespace(ns
);
1533 if (how
& UMOUNT_SYNC
)
1534 p
->mnt
.mnt_flags
|= MNT_SYNC_UMOUNT
;
1536 disconnect
= disconnect_mount(p
, how
);
1537 if (mnt_has_parent(p
)) {
1538 mnt_add_count(p
->mnt_parent
, -1);
1540 /* Don't forget about p */
1541 list_add_tail(&p
->mnt_child
, &p
->mnt_parent
->mnt_mounts
);
1546 change_mnt_propagation(p
, MS_PRIVATE
);
1548 hlist_add_head(&p
->mnt_umount
, &unmounted
);
1552 static void shrink_submounts(struct mount
*mnt
);
1554 static int do_umount_root(struct super_block
*sb
)
1558 down_write(&sb
->s_umount
);
1559 if (!sb_rdonly(sb
)) {
1560 struct fs_context
*fc
;
1562 fc
= fs_context_for_reconfigure(sb
->s_root
, SB_RDONLY
,
1567 ret
= parse_monolithic_mount_data(fc
, NULL
);
1569 ret
= reconfigure_super(fc
);
1573 up_write(&sb
->s_umount
);
1577 static int do_umount(struct mount
*mnt
, int flags
)
1579 struct super_block
*sb
= mnt
->mnt
.mnt_sb
;
1582 retval
= security_sb_umount(&mnt
->mnt
, flags
);
1587 * Allow userspace to request a mountpoint be expired rather than
1588 * unmounting unconditionally. Unmount only happens if:
1589 * (1) the mark is already set (the mark is cleared by mntput())
1590 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1592 if (flags
& MNT_EXPIRE
) {
1593 if (&mnt
->mnt
== current
->fs
->root
.mnt
||
1594 flags
& (MNT_FORCE
| MNT_DETACH
))
1598 * probably don't strictly need the lock here if we examined
1599 * all race cases, but it's a slowpath.
1602 if (mnt_get_count(mnt
) != 2) {
1603 unlock_mount_hash();
1606 unlock_mount_hash();
1608 if (!xchg(&mnt
->mnt_expiry_mark
, 1))
1613 * If we may have to abort operations to get out of this
1614 * mount, and they will themselves hold resources we must
1615 * allow the fs to do things. In the Unix tradition of
1616 * 'Gee thats tricky lets do it in userspace' the umount_begin
1617 * might fail to complete on the first run through as other tasks
1618 * must return, and the like. Thats for the mount program to worry
1619 * about for the moment.
1622 if (flags
& MNT_FORCE
&& sb
->s_op
->umount_begin
) {
1623 sb
->s_op
->umount_begin(sb
);
1627 * No sense to grab the lock for this test, but test itself looks
1628 * somewhat bogus. Suggestions for better replacement?
1629 * Ho-hum... In principle, we might treat that as umount + switch
1630 * to rootfs. GC would eventually take care of the old vfsmount.
1631 * Actually it makes sense, especially if rootfs would contain a
1632 * /reboot - static binary that would close all descriptors and
1633 * call reboot(9). Then init(8) could umount root and exec /reboot.
1635 if (&mnt
->mnt
== current
->fs
->root
.mnt
&& !(flags
& MNT_DETACH
)) {
1637 * Special case for "unmounting" root ...
1638 * we just try to remount it readonly.
1640 if (!ns_capable(sb
->s_user_ns
, CAP_SYS_ADMIN
))
1642 return do_umount_root(sb
);
1648 /* Recheck MNT_LOCKED with the locks held */
1650 if (mnt
->mnt
.mnt_flags
& MNT_LOCKED
)
1654 if (flags
& MNT_DETACH
) {
1655 if (!list_empty(&mnt
->mnt_list
))
1656 umount_tree(mnt
, UMOUNT_PROPAGATE
);
1659 shrink_submounts(mnt
);
1661 if (!propagate_mount_busy(mnt
, 2)) {
1662 if (!list_empty(&mnt
->mnt_list
))
1663 umount_tree(mnt
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
1668 unlock_mount_hash();
1674 * __detach_mounts - lazily unmount all mounts on the specified dentry
1676 * During unlink, rmdir, and d_drop it is possible to loose the path
1677 * to an existing mountpoint, and wind up leaking the mount.
1678 * detach_mounts allows lazily unmounting those mounts instead of
1681 * The caller may hold dentry->d_inode->i_mutex.
1683 void __detach_mounts(struct dentry
*dentry
)
1685 struct mountpoint
*mp
;
1690 mp
= lookup_mountpoint(dentry
);
1695 while (!hlist_empty(&mp
->m_list
)) {
1696 mnt
= hlist_entry(mp
->m_list
.first
, struct mount
, mnt_mp_list
);
1697 if (mnt
->mnt
.mnt_flags
& MNT_UMOUNT
) {
1699 hlist_add_head(&mnt
->mnt_umount
, &unmounted
);
1701 else umount_tree(mnt
, UMOUNT_CONNECTED
);
1705 unlock_mount_hash();
1710 * Is the caller allowed to modify his namespace?
1712 static inline bool may_mount(void)
1714 return ns_capable(current
->nsproxy
->mnt_ns
->user_ns
, CAP_SYS_ADMIN
);
1717 #ifdef CONFIG_MANDATORY_FILE_LOCKING
1718 static inline bool may_mandlock(void)
1720 return capable(CAP_SYS_ADMIN
);
1723 static inline bool may_mandlock(void)
1725 pr_warn("VFS: \"mand\" mount option not supported");
1730 static int can_umount(const struct path
*path
, int flags
)
1732 struct mount
*mnt
= real_mount(path
->mnt
);
1736 if (path
->dentry
!= path
->mnt
->mnt_root
)
1738 if (!check_mnt(mnt
))
1740 if (mnt
->mnt
.mnt_flags
& MNT_LOCKED
) /* Check optimistically */
1742 if (flags
& MNT_FORCE
&& !capable(CAP_SYS_ADMIN
))
1747 // caller is responsible for flags being sane
1748 int path_umount(struct path
*path
, int flags
)
1750 struct mount
*mnt
= real_mount(path
->mnt
);
1753 ret
= can_umount(path
, flags
);
1755 ret
= do_umount(mnt
, flags
);
1757 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1759 mntput_no_expire(mnt
);
1763 static int ksys_umount(char __user
*name
, int flags
)
1765 int lookup_flags
= LOOKUP_MOUNTPOINT
;
1769 // basic validity checks done first
1770 if (flags
& ~(MNT_FORCE
| MNT_DETACH
| MNT_EXPIRE
| UMOUNT_NOFOLLOW
))
1773 if (!(flags
& UMOUNT_NOFOLLOW
))
1774 lookup_flags
|= LOOKUP_FOLLOW
;
1775 ret
= user_path_at(AT_FDCWD
, name
, lookup_flags
, &path
);
1778 return path_umount(&path
, flags
);
1781 SYSCALL_DEFINE2(umount
, char __user
*, name
, int, flags
)
1783 return ksys_umount(name
, flags
);
1786 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1789 * The 2.0 compatible umount. No flags.
1791 SYSCALL_DEFINE1(oldumount
, char __user
*, name
)
1793 return ksys_umount(name
, 0);
1798 static bool is_mnt_ns_file(struct dentry
*dentry
)
1800 /* Is this a proxy for a mount namespace? */
1801 return dentry
->d_op
== &ns_dentry_operations
&&
1802 dentry
->d_fsdata
== &mntns_operations
;
1805 static struct mnt_namespace
*to_mnt_ns(struct ns_common
*ns
)
1807 return container_of(ns
, struct mnt_namespace
, ns
);
1810 struct ns_common
*from_mnt_ns(struct mnt_namespace
*mnt
)
1815 static bool mnt_ns_loop(struct dentry
*dentry
)
1817 /* Could bind mounting the mount namespace inode cause a
1818 * mount namespace loop?
1820 struct mnt_namespace
*mnt_ns
;
1821 if (!is_mnt_ns_file(dentry
))
1824 mnt_ns
= to_mnt_ns(get_proc_ns(dentry
->d_inode
));
1825 return current
->nsproxy
->mnt_ns
->seq
>= mnt_ns
->seq
;
1828 struct mount
*copy_tree(struct mount
*mnt
, struct dentry
*dentry
,
1831 struct mount
*res
, *p
, *q
, *r
, *parent
;
1833 if (!(flag
& CL_COPY_UNBINDABLE
) && IS_MNT_UNBINDABLE(mnt
))
1834 return ERR_PTR(-EINVAL
);
1836 if (!(flag
& CL_COPY_MNT_NS_FILE
) && is_mnt_ns_file(dentry
))
1837 return ERR_PTR(-EINVAL
);
1839 res
= q
= clone_mnt(mnt
, dentry
, flag
);
1843 q
->mnt_mountpoint
= mnt
->mnt_mountpoint
;
1846 list_for_each_entry(r
, &mnt
->mnt_mounts
, mnt_child
) {
1848 if (!is_subdir(r
->mnt_mountpoint
, dentry
))
1851 for (s
= r
; s
; s
= next_mnt(s
, r
)) {
1852 if (!(flag
& CL_COPY_UNBINDABLE
) &&
1853 IS_MNT_UNBINDABLE(s
)) {
1854 if (s
->mnt
.mnt_flags
& MNT_LOCKED
) {
1855 /* Both unbindable and locked. */
1856 q
= ERR_PTR(-EPERM
);
1859 s
= skip_mnt_tree(s
);
1863 if (!(flag
& CL_COPY_MNT_NS_FILE
) &&
1864 is_mnt_ns_file(s
->mnt
.mnt_root
)) {
1865 s
= skip_mnt_tree(s
);
1868 while (p
!= s
->mnt_parent
) {
1874 q
= clone_mnt(p
, p
->mnt
.mnt_root
, flag
);
1878 list_add_tail(&q
->mnt_list
, &res
->mnt_list
);
1879 attach_mnt(q
, parent
, p
->mnt_mp
);
1880 unlock_mount_hash();
1887 umount_tree(res
, UMOUNT_SYNC
);
1888 unlock_mount_hash();
1893 /* Caller should check returned pointer for errors */
1895 struct vfsmount
*collect_mounts(const struct path
*path
)
1899 if (!check_mnt(real_mount(path
->mnt
)))
1900 tree
= ERR_PTR(-EINVAL
);
1902 tree
= copy_tree(real_mount(path
->mnt
), path
->dentry
,
1903 CL_COPY_ALL
| CL_PRIVATE
);
1906 return ERR_CAST(tree
);
1910 static void free_mnt_ns(struct mnt_namespace
*);
1911 static struct mnt_namespace
*alloc_mnt_ns(struct user_namespace
*, bool);
1913 void dissolve_on_fput(struct vfsmount
*mnt
)
1915 struct mnt_namespace
*ns
;
1918 ns
= real_mount(mnt
)->mnt_ns
;
1921 umount_tree(real_mount(mnt
), UMOUNT_CONNECTED
);
1925 unlock_mount_hash();
1931 void drop_collected_mounts(struct vfsmount
*mnt
)
1935 umount_tree(real_mount(mnt
), 0);
1936 unlock_mount_hash();
1941 * clone_private_mount - create a private clone of a path
1943 * This creates a new vfsmount, which will be the clone of @path. The new will
1944 * not be attached anywhere in the namespace and will be private (i.e. changes
1945 * to the originating mount won't be propagated into this).
1947 * Release with mntput().
1949 struct vfsmount
*clone_private_mount(const struct path
*path
)
1951 struct mount
*old_mnt
= real_mount(path
->mnt
);
1952 struct mount
*new_mnt
;
1954 if (IS_MNT_UNBINDABLE(old_mnt
))
1955 return ERR_PTR(-EINVAL
);
1957 new_mnt
= clone_mnt(old_mnt
, path
->dentry
, CL_PRIVATE
);
1958 if (IS_ERR(new_mnt
))
1959 return ERR_CAST(new_mnt
);
1961 /* Longterm mount to be removed by kern_unmount*() */
1962 new_mnt
->mnt_ns
= MNT_NS_INTERNAL
;
1964 return &new_mnt
->mnt
;
1966 EXPORT_SYMBOL_GPL(clone_private_mount
);
1968 int iterate_mounts(int (*f
)(struct vfsmount
*, void *), void *arg
,
1969 struct vfsmount
*root
)
1972 int res
= f(root
, arg
);
1975 list_for_each_entry(mnt
, &real_mount(root
)->mnt_list
, mnt_list
) {
1976 res
= f(&mnt
->mnt
, arg
);
1983 static void lock_mnt_tree(struct mount
*mnt
)
1987 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1988 int flags
= p
->mnt
.mnt_flags
;
1989 /* Don't allow unprivileged users to change mount flags */
1990 flags
|= MNT_LOCK_ATIME
;
1992 if (flags
& MNT_READONLY
)
1993 flags
|= MNT_LOCK_READONLY
;
1995 if (flags
& MNT_NODEV
)
1996 flags
|= MNT_LOCK_NODEV
;
1998 if (flags
& MNT_NOSUID
)
1999 flags
|= MNT_LOCK_NOSUID
;
2001 if (flags
& MNT_NOEXEC
)
2002 flags
|= MNT_LOCK_NOEXEC
;
2003 /* Don't allow unprivileged users to reveal what is under a mount */
2004 if (list_empty(&p
->mnt_expire
))
2005 flags
|= MNT_LOCKED
;
2006 p
->mnt
.mnt_flags
= flags
;
2010 static void cleanup_group_ids(struct mount
*mnt
, struct mount
*end
)
2014 for (p
= mnt
; p
!= end
; p
= next_mnt(p
, mnt
)) {
2015 if (p
->mnt_group_id
&& !IS_MNT_SHARED(p
))
2016 mnt_release_group_id(p
);
2020 static int invent_group_ids(struct mount
*mnt
, bool recurse
)
2024 for (p
= mnt
; p
; p
= recurse
? next_mnt(p
, mnt
) : NULL
) {
2025 if (!p
->mnt_group_id
&& !IS_MNT_SHARED(p
)) {
2026 int err
= mnt_alloc_group_id(p
);
2028 cleanup_group_ids(mnt
, p
);
2037 int count_mounts(struct mnt_namespace
*ns
, struct mount
*mnt
)
2039 unsigned int max
= READ_ONCE(sysctl_mount_max
);
2040 unsigned int mounts
= 0, old
, pending
, sum
;
2043 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
))
2047 pending
= ns
->pending_mounts
;
2048 sum
= old
+ pending
;
2052 (mounts
> (max
- sum
)))
2055 ns
->pending_mounts
= pending
+ mounts
;
2060 * @source_mnt : mount tree to be attached
2061 * @nd : place the mount tree @source_mnt is attached
2062 * @parent_nd : if non-null, detach the source_mnt from its parent and
2063 * store the parent mount and mountpoint dentry.
2064 * (done when source_mnt is moved)
2066 * NOTE: in the table below explains the semantics when a source mount
2067 * of a given type is attached to a destination mount of a given type.
2068 * ---------------------------------------------------------------------------
2069 * | BIND MOUNT OPERATION |
2070 * |**************************************************************************
2071 * | source-->| shared | private | slave | unbindable |
2075 * |**************************************************************************
2076 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
2078 * |non-shared| shared (+) | private | slave (*) | invalid |
2079 * ***************************************************************************
2080 * A bind operation clones the source mount and mounts the clone on the
2081 * destination mount.
2083 * (++) the cloned mount is propagated to all the mounts in the propagation
2084 * tree of the destination mount and the cloned mount is added to
2085 * the peer group of the source mount.
2086 * (+) the cloned mount is created under the destination mount and is marked
2087 * as shared. The cloned mount is added to the peer group of the source
2089 * (+++) the mount is propagated to all the mounts in the propagation tree
2090 * of the destination mount and the cloned mount is made slave
2091 * of the same master as that of the source mount. The cloned mount
2092 * is marked as 'shared and slave'.
2093 * (*) the cloned mount is made a slave of the same master as that of the
2096 * ---------------------------------------------------------------------------
2097 * | MOVE MOUNT OPERATION |
2098 * |**************************************************************************
2099 * | source-->| shared | private | slave | unbindable |
2103 * |**************************************************************************
2104 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
2106 * |non-shared| shared (+*) | private | slave (*) | unbindable |
2107 * ***************************************************************************
2109 * (+) the mount is moved to the destination. And is then propagated to
2110 * all the mounts in the propagation tree of the destination mount.
2111 * (+*) the mount is moved to the destination.
2112 * (+++) the mount is moved to the destination and is then propagated to
2113 * all the mounts belonging to the destination mount's propagation tree.
2114 * the mount is marked as 'shared and slave'.
2115 * (*) the mount continues to be a slave at the new location.
2117 * if the source mount is a tree, the operations explained above is
2118 * applied to each mount in the tree.
2119 * Must be called without spinlocks held, since this function can sleep
2122 static int attach_recursive_mnt(struct mount
*source_mnt
,
2123 struct mount
*dest_mnt
,
2124 struct mountpoint
*dest_mp
,
2127 struct user_namespace
*user_ns
= current
->nsproxy
->mnt_ns
->user_ns
;
2128 HLIST_HEAD(tree_list
);
2129 struct mnt_namespace
*ns
= dest_mnt
->mnt_ns
;
2130 struct mountpoint
*smp
;
2131 struct mount
*child
, *p
;
2132 struct hlist_node
*n
;
2135 /* Preallocate a mountpoint in case the new mounts need
2136 * to be tucked under other mounts.
2138 smp
= get_mountpoint(source_mnt
->mnt
.mnt_root
);
2140 return PTR_ERR(smp
);
2142 /* Is there space to add these mounts to the mount namespace? */
2144 err
= count_mounts(ns
, source_mnt
);
2149 if (IS_MNT_SHARED(dest_mnt
)) {
2150 err
= invent_group_ids(source_mnt
, true);
2153 err
= propagate_mnt(dest_mnt
, dest_mp
, source_mnt
, &tree_list
);
2156 goto out_cleanup_ids
;
2157 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
2163 unhash_mnt(source_mnt
);
2164 attach_mnt(source_mnt
, dest_mnt
, dest_mp
);
2165 touch_mnt_namespace(source_mnt
->mnt_ns
);
2167 if (source_mnt
->mnt_ns
) {
2168 /* move from anon - the caller will destroy */
2169 list_del_init(&source_mnt
->mnt_ns
->list
);
2171 mnt_set_mountpoint(dest_mnt
, dest_mp
, source_mnt
);
2172 commit_tree(source_mnt
);
2175 hlist_for_each_entry_safe(child
, n
, &tree_list
, mnt_hash
) {
2177 hlist_del_init(&child
->mnt_hash
);
2178 q
= __lookup_mnt(&child
->mnt_parent
->mnt
,
2179 child
->mnt_mountpoint
);
2181 mnt_change_mountpoint(child
, smp
, q
);
2182 /* Notice when we are propagating across user namespaces */
2183 if (child
->mnt_parent
->mnt_ns
->user_ns
!= user_ns
)
2184 lock_mnt_tree(child
);
2185 child
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
2188 put_mountpoint(smp
);
2189 unlock_mount_hash();
2194 while (!hlist_empty(&tree_list
)) {
2195 child
= hlist_entry(tree_list
.first
, struct mount
, mnt_hash
);
2196 child
->mnt_parent
->mnt_ns
->pending_mounts
= 0;
2197 umount_tree(child
, UMOUNT_SYNC
);
2199 unlock_mount_hash();
2200 cleanup_group_ids(source_mnt
, NULL
);
2202 ns
->pending_mounts
= 0;
2204 read_seqlock_excl(&mount_lock
);
2205 put_mountpoint(smp
);
2206 read_sequnlock_excl(&mount_lock
);
2211 static struct mountpoint
*lock_mount(struct path
*path
)
2213 struct vfsmount
*mnt
;
2214 struct dentry
*dentry
= path
->dentry
;
2216 inode_lock(dentry
->d_inode
);
2217 if (unlikely(cant_mount(dentry
))) {
2218 inode_unlock(dentry
->d_inode
);
2219 return ERR_PTR(-ENOENT
);
2222 mnt
= lookup_mnt(path
);
2224 struct mountpoint
*mp
= get_mountpoint(dentry
);
2227 inode_unlock(dentry
->d_inode
);
2233 inode_unlock(path
->dentry
->d_inode
);
2236 dentry
= path
->dentry
= dget(mnt
->mnt_root
);
2240 static void unlock_mount(struct mountpoint
*where
)
2242 struct dentry
*dentry
= where
->m_dentry
;
2244 read_seqlock_excl(&mount_lock
);
2245 put_mountpoint(where
);
2246 read_sequnlock_excl(&mount_lock
);
2249 inode_unlock(dentry
->d_inode
);
2252 static int graft_tree(struct mount
*mnt
, struct mount
*p
, struct mountpoint
*mp
)
2254 if (mnt
->mnt
.mnt_sb
->s_flags
& SB_NOUSER
)
2257 if (d_is_dir(mp
->m_dentry
) !=
2258 d_is_dir(mnt
->mnt
.mnt_root
))
2261 return attach_recursive_mnt(mnt
, p
, mp
, false);
2265 * Sanity check the flags to change_mnt_propagation.
2268 static int flags_to_propagation_type(int ms_flags
)
2270 int type
= ms_flags
& ~(MS_REC
| MS_SILENT
);
2272 /* Fail if any non-propagation flags are set */
2273 if (type
& ~(MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
2275 /* Only one propagation flag should be set */
2276 if (!is_power_of_2(type
))
2282 * recursively change the type of the mountpoint.
2284 static int do_change_type(struct path
*path
, int ms_flags
)
2287 struct mount
*mnt
= real_mount(path
->mnt
);
2288 int recurse
= ms_flags
& MS_REC
;
2292 if (path
->dentry
!= path
->mnt
->mnt_root
)
2295 type
= flags_to_propagation_type(ms_flags
);
2300 if (type
== MS_SHARED
) {
2301 err
= invent_group_ids(mnt
, recurse
);
2307 for (m
= mnt
; m
; m
= (recurse
? next_mnt(m
, mnt
) : NULL
))
2308 change_mnt_propagation(m
, type
);
2309 unlock_mount_hash();
2316 static bool has_locked_children(struct mount
*mnt
, struct dentry
*dentry
)
2318 struct mount
*child
;
2319 list_for_each_entry(child
, &mnt
->mnt_mounts
, mnt_child
) {
2320 if (!is_subdir(child
->mnt_mountpoint
, dentry
))
2323 if (child
->mnt
.mnt_flags
& MNT_LOCKED
)
2329 static struct mount
*__do_loopback(struct path
*old_path
, int recurse
)
2331 struct mount
*mnt
= ERR_PTR(-EINVAL
), *old
= real_mount(old_path
->mnt
);
2333 if (IS_MNT_UNBINDABLE(old
))
2336 if (!check_mnt(old
) && old_path
->dentry
->d_op
!= &ns_dentry_operations
)
2339 if (!recurse
&& has_locked_children(old
, old_path
->dentry
))
2343 mnt
= copy_tree(old
, old_path
->dentry
, CL_COPY_MNT_NS_FILE
);
2345 mnt
= clone_mnt(old
, old_path
->dentry
, 0);
2348 mnt
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
2354 * do loopback mount.
2356 static int do_loopback(struct path
*path
, const char *old_name
,
2359 struct path old_path
;
2360 struct mount
*mnt
= NULL
, *parent
;
2361 struct mountpoint
*mp
;
2363 if (!old_name
|| !*old_name
)
2365 err
= kern_path(old_name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &old_path
);
2370 if (mnt_ns_loop(old_path
.dentry
))
2373 mp
= lock_mount(path
);
2379 parent
= real_mount(path
->mnt
);
2380 if (!check_mnt(parent
))
2383 mnt
= __do_loopback(&old_path
, recurse
);
2389 err
= graft_tree(mnt
, parent
, mp
);
2392 umount_tree(mnt
, UMOUNT_SYNC
);
2393 unlock_mount_hash();
2398 path_put(&old_path
);
2402 static struct file
*open_detached_copy(struct path
*path
, bool recursive
)
2404 struct user_namespace
*user_ns
= current
->nsproxy
->mnt_ns
->user_ns
;
2405 struct mnt_namespace
*ns
= alloc_mnt_ns(user_ns
, true);
2406 struct mount
*mnt
, *p
;
2410 return ERR_CAST(ns
);
2413 mnt
= __do_loopback(path
, recursive
);
2417 return ERR_CAST(mnt
);
2421 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
2426 list_add_tail(&ns
->list
, &mnt
->mnt_list
);
2428 unlock_mount_hash();
2432 path
->mnt
= &mnt
->mnt
;
2433 file
= dentry_open(path
, O_PATH
, current_cred());
2435 dissolve_on_fput(path
->mnt
);
2437 file
->f_mode
|= FMODE_NEED_UNMOUNT
;
2441 SYSCALL_DEFINE3(open_tree
, int, dfd
, const char __user
*, filename
, unsigned, flags
)
2445 int lookup_flags
= LOOKUP_AUTOMOUNT
| LOOKUP_FOLLOW
;
2446 bool detached
= flags
& OPEN_TREE_CLONE
;
2450 BUILD_BUG_ON(OPEN_TREE_CLOEXEC
!= O_CLOEXEC
);
2452 if (flags
& ~(AT_EMPTY_PATH
| AT_NO_AUTOMOUNT
| AT_RECURSIVE
|
2453 AT_SYMLINK_NOFOLLOW
| OPEN_TREE_CLONE
|
2457 if ((flags
& (AT_RECURSIVE
| OPEN_TREE_CLONE
)) == AT_RECURSIVE
)
2460 if (flags
& AT_NO_AUTOMOUNT
)
2461 lookup_flags
&= ~LOOKUP_AUTOMOUNT
;
2462 if (flags
& AT_SYMLINK_NOFOLLOW
)
2463 lookup_flags
&= ~LOOKUP_FOLLOW
;
2464 if (flags
& AT_EMPTY_PATH
)
2465 lookup_flags
|= LOOKUP_EMPTY
;
2467 if (detached
&& !may_mount())
2470 fd
= get_unused_fd_flags(flags
& O_CLOEXEC
);
2474 error
= user_path_at(dfd
, filename
, lookup_flags
, &path
);
2475 if (unlikely(error
)) {
2476 file
= ERR_PTR(error
);
2479 file
= open_detached_copy(&path
, flags
& AT_RECURSIVE
);
2481 file
= dentry_open(&path
, O_PATH
, current_cred());
2486 return PTR_ERR(file
);
2488 fd_install(fd
, file
);
2493 * Don't allow locked mount flags to be cleared.
2495 * No locks need to be held here while testing the various MNT_LOCK
2496 * flags because those flags can never be cleared once they are set.
2498 static bool can_change_locked_flags(struct mount
*mnt
, unsigned int mnt_flags
)
2500 unsigned int fl
= mnt
->mnt
.mnt_flags
;
2502 if ((fl
& MNT_LOCK_READONLY
) &&
2503 !(mnt_flags
& MNT_READONLY
))
2506 if ((fl
& MNT_LOCK_NODEV
) &&
2507 !(mnt_flags
& MNT_NODEV
))
2510 if ((fl
& MNT_LOCK_NOSUID
) &&
2511 !(mnt_flags
& MNT_NOSUID
))
2514 if ((fl
& MNT_LOCK_NOEXEC
) &&
2515 !(mnt_flags
& MNT_NOEXEC
))
2518 if ((fl
& MNT_LOCK_ATIME
) &&
2519 ((fl
& MNT_ATIME_MASK
) != (mnt_flags
& MNT_ATIME_MASK
)))
2525 static int change_mount_ro_state(struct mount
*mnt
, unsigned int mnt_flags
)
2527 bool readonly_request
= (mnt_flags
& MNT_READONLY
);
2529 if (readonly_request
== __mnt_is_readonly(&mnt
->mnt
))
2532 if (readonly_request
)
2533 return mnt_make_readonly(mnt
);
2535 mnt
->mnt
.mnt_flags
&= ~MNT_READONLY
;
2539 static void set_mount_attributes(struct mount
*mnt
, unsigned int mnt_flags
)
2541 mnt_flags
|= mnt
->mnt
.mnt_flags
& ~MNT_USER_SETTABLE_MASK
;
2542 mnt
->mnt
.mnt_flags
= mnt_flags
;
2543 touch_mnt_namespace(mnt
->mnt_ns
);
2546 static void mnt_warn_timestamp_expiry(struct path
*mountpoint
, struct vfsmount
*mnt
)
2548 struct super_block
*sb
= mnt
->mnt_sb
;
2550 if (!__mnt_is_readonly(mnt
) &&
2551 (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX
> sb
->s_time_max
)) {
2552 char *buf
= (char *)__get_free_page(GFP_KERNEL
);
2553 char *mntpath
= buf
? d_path(mountpoint
, buf
, PAGE_SIZE
) : ERR_PTR(-ENOMEM
);
2556 time64_to_tm(sb
->s_time_max
, 0, &tm
);
2558 pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n",
2560 is_mounted(mnt
) ? "remounted" : "mounted",
2562 tm
.tm_year
+1900, (unsigned long long)sb
->s_time_max
);
2564 free_page((unsigned long)buf
);
2569 * Handle reconfiguration of the mountpoint only without alteration of the
2570 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
2573 static int do_reconfigure_mnt(struct path
*path
, unsigned int mnt_flags
)
2575 struct super_block
*sb
= path
->mnt
->mnt_sb
;
2576 struct mount
*mnt
= real_mount(path
->mnt
);
2579 if (!check_mnt(mnt
))
2582 if (path
->dentry
!= mnt
->mnt
.mnt_root
)
2585 if (!can_change_locked_flags(mnt
, mnt_flags
))
2589 * We're only checking whether the superblock is read-only not
2590 * changing it, so only take down_read(&sb->s_umount).
2592 down_read(&sb
->s_umount
);
2594 ret
= change_mount_ro_state(mnt
, mnt_flags
);
2596 set_mount_attributes(mnt
, mnt_flags
);
2597 unlock_mount_hash();
2598 up_read(&sb
->s_umount
);
2600 mnt_warn_timestamp_expiry(path
, &mnt
->mnt
);
2606 * change filesystem flags. dir should be a physical root of filesystem.
2607 * If you've mounted a non-root directory somewhere and want to do remount
2608 * on it - tough luck.
2610 static int do_remount(struct path
*path
, int ms_flags
, int sb_flags
,
2611 int mnt_flags
, void *data
)
2614 struct super_block
*sb
= path
->mnt
->mnt_sb
;
2615 struct mount
*mnt
= real_mount(path
->mnt
);
2616 struct fs_context
*fc
;
2618 if (!check_mnt(mnt
))
2621 if (path
->dentry
!= path
->mnt
->mnt_root
)
2624 if (!can_change_locked_flags(mnt
, mnt_flags
))
2627 fc
= fs_context_for_reconfigure(path
->dentry
, sb_flags
, MS_RMT_MASK
);
2632 err
= parse_monolithic_mount_data(fc
, data
);
2634 down_write(&sb
->s_umount
);
2636 if (ns_capable(sb
->s_user_ns
, CAP_SYS_ADMIN
)) {
2637 err
= reconfigure_super(fc
);
2640 set_mount_attributes(mnt
, mnt_flags
);
2641 unlock_mount_hash();
2644 up_write(&sb
->s_umount
);
2647 mnt_warn_timestamp_expiry(path
, &mnt
->mnt
);
2653 static inline int tree_contains_unbindable(struct mount
*mnt
)
2656 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
2657 if (IS_MNT_UNBINDABLE(p
))
2664 * Check that there aren't references to earlier/same mount namespaces in the
2665 * specified subtree. Such references can act as pins for mount namespaces
2666 * that aren't checked by the mount-cycle checking code, thereby allowing
2667 * cycles to be made.
2669 static bool check_for_nsfs_mounts(struct mount
*subtree
)
2675 for (p
= subtree
; p
; p
= next_mnt(p
, subtree
))
2676 if (mnt_ns_loop(p
->mnt
.mnt_root
))
2681 unlock_mount_hash();
2685 static int do_move_mount(struct path
*old_path
, struct path
*new_path
)
2687 struct mnt_namespace
*ns
;
2690 struct mount
*parent
;
2691 struct mountpoint
*mp
, *old_mp
;
2695 mp
= lock_mount(new_path
);
2699 old
= real_mount(old_path
->mnt
);
2700 p
= real_mount(new_path
->mnt
);
2701 parent
= old
->mnt_parent
;
2702 attached
= mnt_has_parent(old
);
2703 old_mp
= old
->mnt_mp
;
2707 /* The mountpoint must be in our namespace. */
2711 /* The thing moved must be mounted... */
2712 if (!is_mounted(&old
->mnt
))
2715 /* ... and either ours or the root of anon namespace */
2716 if (!(attached
? check_mnt(old
) : is_anon_ns(ns
)))
2719 if (old
->mnt
.mnt_flags
& MNT_LOCKED
)
2722 if (old_path
->dentry
!= old_path
->mnt
->mnt_root
)
2725 if (d_is_dir(new_path
->dentry
) !=
2726 d_is_dir(old_path
->dentry
))
2729 * Don't move a mount residing in a shared parent.
2731 if (attached
&& IS_MNT_SHARED(parent
))
2734 * Don't move a mount tree containing unbindable mounts to a destination
2735 * mount which is shared.
2737 if (IS_MNT_SHARED(p
) && tree_contains_unbindable(old
))
2740 if (!check_for_nsfs_mounts(old
))
2742 for (; mnt_has_parent(p
); p
= p
->mnt_parent
)
2746 err
= attach_recursive_mnt(old
, real_mount(new_path
->mnt
), mp
,
2751 /* if the mount is moved, it should no longer be expire
2753 list_del_init(&old
->mnt_expire
);
2755 put_mountpoint(old_mp
);
2760 mntput_no_expire(parent
);
2767 static int do_move_mount_old(struct path
*path
, const char *old_name
)
2769 struct path old_path
;
2772 if (!old_name
|| !*old_name
)
2775 err
= kern_path(old_name
, LOOKUP_FOLLOW
, &old_path
);
2779 err
= do_move_mount(&old_path
, path
);
2780 path_put(&old_path
);
2785 * add a mount into a namespace's mount tree
2787 static int do_add_mount(struct mount
*newmnt
, struct mountpoint
*mp
,
2788 struct path
*path
, int mnt_flags
)
2790 struct mount
*parent
= real_mount(path
->mnt
);
2792 mnt_flags
&= ~MNT_INTERNAL_FLAGS
;
2794 if (unlikely(!check_mnt(parent
))) {
2795 /* that's acceptable only for automounts done in private ns */
2796 if (!(mnt_flags
& MNT_SHRINKABLE
))
2798 /* ... and for those we'd better have mountpoint still alive */
2799 if (!parent
->mnt_ns
)
2803 /* Refuse the same filesystem on the same mount point */
2804 if (path
->mnt
->mnt_sb
== newmnt
->mnt
.mnt_sb
&&
2805 path
->mnt
->mnt_root
== path
->dentry
)
2808 if (d_is_symlink(newmnt
->mnt
.mnt_root
))
2811 newmnt
->mnt
.mnt_flags
= mnt_flags
;
2812 return graft_tree(newmnt
, parent
, mp
);
2815 static bool mount_too_revealing(const struct super_block
*sb
, int *new_mnt_flags
);
2818 * Create a new mount using a superblock configuration and request it
2819 * be added to the namespace tree.
2821 static int do_new_mount_fc(struct fs_context
*fc
, struct path
*mountpoint
,
2822 unsigned int mnt_flags
)
2824 struct vfsmount
*mnt
;
2825 struct mountpoint
*mp
;
2826 struct super_block
*sb
= fc
->root
->d_sb
;
2829 error
= security_sb_kern_mount(sb
);
2830 if (!error
&& mount_too_revealing(sb
, &mnt_flags
))
2833 if (unlikely(error
)) {
2838 up_write(&sb
->s_umount
);
2840 mnt
= vfs_create_mount(fc
);
2842 return PTR_ERR(mnt
);
2844 mnt_warn_timestamp_expiry(mountpoint
, mnt
);
2846 mp
= lock_mount(mountpoint
);
2851 error
= do_add_mount(real_mount(mnt
), mp
, mountpoint
, mnt_flags
);
2859 * create a new mount for userspace and request it to be added into the
2862 static int do_new_mount(struct path
*path
, const char *fstype
, int sb_flags
,
2863 int mnt_flags
, const char *name
, void *data
)
2865 struct file_system_type
*type
;
2866 struct fs_context
*fc
;
2867 const char *subtype
= NULL
;
2873 type
= get_fs_type(fstype
);
2877 if (type
->fs_flags
& FS_HAS_SUBTYPE
) {
2878 subtype
= strchr(fstype
, '.');
2882 put_filesystem(type
);
2888 fc
= fs_context_for_mount(type
, sb_flags
);
2889 put_filesystem(type
);
2894 err
= vfs_parse_fs_string(fc
, "subtype",
2895 subtype
, strlen(subtype
));
2897 err
= vfs_parse_fs_string(fc
, "source", name
, strlen(name
));
2899 err
= parse_monolithic_mount_data(fc
, data
);
2900 if (!err
&& !mount_capable(fc
))
2903 err
= vfs_get_tree(fc
);
2905 err
= do_new_mount_fc(fc
, path
, mnt_flags
);
2911 int finish_automount(struct vfsmount
*m
, struct path
*path
)
2913 struct dentry
*dentry
= path
->dentry
;
2914 struct mountpoint
*mp
;
2923 mnt
= real_mount(m
);
2924 /* The new mount record should have at least 2 refs to prevent it being
2925 * expired before we get a chance to add it
2927 BUG_ON(mnt_get_count(mnt
) < 2);
2929 if (m
->mnt_sb
== path
->mnt
->mnt_sb
&&
2930 m
->mnt_root
== dentry
) {
2936 * we don't want to use lock_mount() - in this case finding something
2937 * that overmounts our mountpoint to be means "quitely drop what we've
2938 * got", not "try to mount it on top".
2940 inode_lock(dentry
->d_inode
);
2942 if (unlikely(cant_mount(dentry
))) {
2944 goto discard_locked
;
2947 if (unlikely(__lookup_mnt(path
->mnt
, dentry
))) {
2950 goto discard_locked
;
2953 mp
= get_mountpoint(dentry
);
2956 goto discard_locked
;
2959 err
= do_add_mount(mnt
, mp
, path
, path
->mnt
->mnt_flags
| MNT_SHRINKABLE
);
2968 inode_unlock(dentry
->d_inode
);
2970 /* remove m from any expiration list it may be on */
2971 if (!list_empty(&mnt
->mnt_expire
)) {
2973 list_del_init(&mnt
->mnt_expire
);
2982 * mnt_set_expiry - Put a mount on an expiration list
2983 * @mnt: The mount to list.
2984 * @expiry_list: The list to add the mount to.
2986 void mnt_set_expiry(struct vfsmount
*mnt
, struct list_head
*expiry_list
)
2990 list_add_tail(&real_mount(mnt
)->mnt_expire
, expiry_list
);
2994 EXPORT_SYMBOL(mnt_set_expiry
);
2997 * process a list of expirable mountpoints with the intent of discarding any
2998 * mountpoints that aren't in use and haven't been touched since last we came
3001 void mark_mounts_for_expiry(struct list_head
*mounts
)
3003 struct mount
*mnt
, *next
;
3004 LIST_HEAD(graveyard
);
3006 if (list_empty(mounts
))
3012 /* extract from the expiration list every vfsmount that matches the
3013 * following criteria:
3014 * - only referenced by its parent vfsmount
3015 * - still marked for expiry (marked on the last call here; marks are
3016 * cleared by mntput())
3018 list_for_each_entry_safe(mnt
, next
, mounts
, mnt_expire
) {
3019 if (!xchg(&mnt
->mnt_expiry_mark
, 1) ||
3020 propagate_mount_busy(mnt
, 1))
3022 list_move(&mnt
->mnt_expire
, &graveyard
);
3024 while (!list_empty(&graveyard
)) {
3025 mnt
= list_first_entry(&graveyard
, struct mount
, mnt_expire
);
3026 touch_mnt_namespace(mnt
->mnt_ns
);
3027 umount_tree(mnt
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
3029 unlock_mount_hash();
3033 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry
);
3036 * Ripoff of 'select_parent()'
3038 * search the list of submounts for a given mountpoint, and move any
3039 * shrinkable submounts to the 'graveyard' list.
3041 static int select_submounts(struct mount
*parent
, struct list_head
*graveyard
)
3043 struct mount
*this_parent
= parent
;
3044 struct list_head
*next
;
3048 next
= this_parent
->mnt_mounts
.next
;
3050 while (next
!= &this_parent
->mnt_mounts
) {
3051 struct list_head
*tmp
= next
;
3052 struct mount
*mnt
= list_entry(tmp
, struct mount
, mnt_child
);
3055 if (!(mnt
->mnt
.mnt_flags
& MNT_SHRINKABLE
))
3058 * Descend a level if the d_mounts list is non-empty.
3060 if (!list_empty(&mnt
->mnt_mounts
)) {
3065 if (!propagate_mount_busy(mnt
, 1)) {
3066 list_move_tail(&mnt
->mnt_expire
, graveyard
);
3071 * All done at this level ... ascend and resume the search
3073 if (this_parent
!= parent
) {
3074 next
= this_parent
->mnt_child
.next
;
3075 this_parent
= this_parent
->mnt_parent
;
3082 * process a list of expirable mountpoints with the intent of discarding any
3083 * submounts of a specific parent mountpoint
3085 * mount_lock must be held for write
3087 static void shrink_submounts(struct mount
*mnt
)
3089 LIST_HEAD(graveyard
);
3092 /* extract submounts of 'mountpoint' from the expiration list */
3093 while (select_submounts(mnt
, &graveyard
)) {
3094 while (!list_empty(&graveyard
)) {
3095 m
= list_first_entry(&graveyard
, struct mount
,
3097 touch_mnt_namespace(m
->mnt_ns
);
3098 umount_tree(m
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
3103 static void *copy_mount_options(const void __user
* data
)
3106 unsigned left
, offset
;
3111 copy
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
3113 return ERR_PTR(-ENOMEM
);
3115 left
= copy_from_user(copy
, data
, PAGE_SIZE
);
3118 * Not all architectures have an exact copy_from_user(). Resort to
3121 offset
= PAGE_SIZE
- left
;
3124 if (get_user(c
, (const char __user
*)data
+ offset
))
3131 if (left
== PAGE_SIZE
) {
3133 return ERR_PTR(-EFAULT
);
3139 static char *copy_mount_string(const void __user
*data
)
3141 return data
? strndup_user(data
, PATH_MAX
) : NULL
;
3145 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
3146 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
3148 * data is a (void *) that can point to any structure up to
3149 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
3150 * information (or be NULL).
3152 * Pre-0.97 versions of mount() didn't have a flags word.
3153 * When the flags word was introduced its top half was required
3154 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
3155 * Therefore, if this magic number is present, it carries no information
3156 * and must be discarded.
3158 int path_mount(const char *dev_name
, struct path
*path
,
3159 const char *type_page
, unsigned long flags
, void *data_page
)
3161 unsigned int mnt_flags
= 0, sb_flags
;
3165 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
3166 flags
&= ~MS_MGC_MSK
;
3168 /* Basic sanity checks */
3170 ((char *)data_page
)[PAGE_SIZE
- 1] = 0;
3172 if (flags
& MS_NOUSER
)
3175 ret
= security_sb_mount(dev_name
, path
, type_page
, flags
, data_page
);
3180 if ((flags
& SB_MANDLOCK
) && !may_mandlock())
3183 /* Default to relatime unless overriden */
3184 if (!(flags
& MS_NOATIME
))
3185 mnt_flags
|= MNT_RELATIME
;
3187 /* Separate the per-mountpoint flags */
3188 if (flags
& MS_NOSUID
)
3189 mnt_flags
|= MNT_NOSUID
;
3190 if (flags
& MS_NODEV
)
3191 mnt_flags
|= MNT_NODEV
;
3192 if (flags
& MS_NOEXEC
)
3193 mnt_flags
|= MNT_NOEXEC
;
3194 if (flags
& MS_NOATIME
)
3195 mnt_flags
|= MNT_NOATIME
;
3196 if (flags
& MS_NODIRATIME
)
3197 mnt_flags
|= MNT_NODIRATIME
;
3198 if (flags
& MS_STRICTATIME
)
3199 mnt_flags
&= ~(MNT_RELATIME
| MNT_NOATIME
);
3200 if (flags
& MS_RDONLY
)
3201 mnt_flags
|= MNT_READONLY
;
3202 if (flags
& MS_NOSYMFOLLOW
)
3203 mnt_flags
|= MNT_NOSYMFOLLOW
;
3205 /* The default atime for remount is preservation */
3206 if ((flags
& MS_REMOUNT
) &&
3207 ((flags
& (MS_NOATIME
| MS_NODIRATIME
| MS_RELATIME
|
3208 MS_STRICTATIME
)) == 0)) {
3209 mnt_flags
&= ~MNT_ATIME_MASK
;
3210 mnt_flags
|= path
->mnt
->mnt_flags
& MNT_ATIME_MASK
;
3213 sb_flags
= flags
& (SB_RDONLY
|
3222 if ((flags
& (MS_REMOUNT
| MS_BIND
)) == (MS_REMOUNT
| MS_BIND
))
3223 return do_reconfigure_mnt(path
, mnt_flags
);
3224 if (flags
& MS_REMOUNT
)
3225 return do_remount(path
, flags
, sb_flags
, mnt_flags
, data_page
);
3226 if (flags
& MS_BIND
)
3227 return do_loopback(path
, dev_name
, flags
& MS_REC
);
3228 if (flags
& (MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
3229 return do_change_type(path
, flags
);
3230 if (flags
& MS_MOVE
)
3231 return do_move_mount_old(path
, dev_name
);
3233 return do_new_mount(path
, type_page
, sb_flags
, mnt_flags
, dev_name
,
3237 long do_mount(const char *dev_name
, const char __user
*dir_name
,
3238 const char *type_page
, unsigned long flags
, void *data_page
)
3243 ret
= user_path_at(AT_FDCWD
, dir_name
, LOOKUP_FOLLOW
, &path
);
3246 ret
= path_mount(dev_name
, &path
, type_page
, flags
, data_page
);
3251 static struct ucounts
*inc_mnt_namespaces(struct user_namespace
*ns
)
3253 return inc_ucount(ns
, current_euid(), UCOUNT_MNT_NAMESPACES
);
3256 static void dec_mnt_namespaces(struct ucounts
*ucounts
)
3258 dec_ucount(ucounts
, UCOUNT_MNT_NAMESPACES
);
3261 static void free_mnt_ns(struct mnt_namespace
*ns
)
3263 if (!is_anon_ns(ns
))
3264 ns_free_inum(&ns
->ns
);
3265 dec_mnt_namespaces(ns
->ucounts
);
3266 put_user_ns(ns
->user_ns
);
3271 * Assign a sequence number so we can detect when we attempt to bind
3272 * mount a reference to an older mount namespace into the current
3273 * mount namespace, preventing reference counting loops. A 64bit
3274 * number incrementing at 10Ghz will take 12,427 years to wrap which
3275 * is effectively never, so we can ignore the possibility.
3277 static atomic64_t mnt_ns_seq
= ATOMIC64_INIT(1);
3279 static struct mnt_namespace
*alloc_mnt_ns(struct user_namespace
*user_ns
, bool anon
)
3281 struct mnt_namespace
*new_ns
;
3282 struct ucounts
*ucounts
;
3285 ucounts
= inc_mnt_namespaces(user_ns
);
3287 return ERR_PTR(-ENOSPC
);
3289 new_ns
= kzalloc(sizeof(struct mnt_namespace
), GFP_KERNEL
);
3291 dec_mnt_namespaces(ucounts
);
3292 return ERR_PTR(-ENOMEM
);
3295 ret
= ns_alloc_inum(&new_ns
->ns
);
3298 dec_mnt_namespaces(ucounts
);
3299 return ERR_PTR(ret
);
3302 new_ns
->ns
.ops
= &mntns_operations
;
3304 new_ns
->seq
= atomic64_add_return(1, &mnt_ns_seq
);
3305 refcount_set(&new_ns
->ns
.count
, 1);
3306 INIT_LIST_HEAD(&new_ns
->list
);
3307 init_waitqueue_head(&new_ns
->poll
);
3308 spin_lock_init(&new_ns
->ns_lock
);
3309 new_ns
->user_ns
= get_user_ns(user_ns
);
3310 new_ns
->ucounts
= ucounts
;
3315 struct mnt_namespace
*copy_mnt_ns(unsigned long flags
, struct mnt_namespace
*ns
,
3316 struct user_namespace
*user_ns
, struct fs_struct
*new_fs
)
3318 struct mnt_namespace
*new_ns
;
3319 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
;
3320 struct mount
*p
, *q
;
3327 if (likely(!(flags
& CLONE_NEWNS
))) {
3334 new_ns
= alloc_mnt_ns(user_ns
, false);
3339 /* First pass: copy the tree topology */
3340 copy_flags
= CL_COPY_UNBINDABLE
| CL_EXPIRE
;
3341 if (user_ns
!= ns
->user_ns
)
3342 copy_flags
|= CL_SHARED_TO_SLAVE
;
3343 new = copy_tree(old
, old
->mnt
.mnt_root
, copy_flags
);
3346 free_mnt_ns(new_ns
);
3347 return ERR_CAST(new);
3349 if (user_ns
!= ns
->user_ns
) {
3352 unlock_mount_hash();
3355 list_add_tail(&new_ns
->list
, &new->mnt_list
);
3358 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
3359 * as belonging to new namespace. We have already acquired a private
3360 * fs_struct, so tsk->fs->lock is not needed.
3368 if (&p
->mnt
== new_fs
->root
.mnt
) {
3369 new_fs
->root
.mnt
= mntget(&q
->mnt
);
3372 if (&p
->mnt
== new_fs
->pwd
.mnt
) {
3373 new_fs
->pwd
.mnt
= mntget(&q
->mnt
);
3377 p
= next_mnt(p
, old
);
3378 q
= next_mnt(q
, new);
3381 while (p
->mnt
.mnt_root
!= q
->mnt
.mnt_root
)
3382 p
= next_mnt(p
, old
);
3394 struct dentry
*mount_subtree(struct vfsmount
*m
, const char *name
)
3396 struct mount
*mnt
= real_mount(m
);
3397 struct mnt_namespace
*ns
;
3398 struct super_block
*s
;
3402 ns
= alloc_mnt_ns(&init_user_ns
, true);
3405 return ERR_CAST(ns
);
3410 list_add(&mnt
->mnt_list
, &ns
->list
);
3412 err
= vfs_path_lookup(m
->mnt_root
, m
,
3413 name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &path
);
3418 return ERR_PTR(err
);
3420 /* trade a vfsmount reference for active sb one */
3421 s
= path
.mnt
->mnt_sb
;
3422 atomic_inc(&s
->s_active
);
3424 /* lock the sucker */
3425 down_write(&s
->s_umount
);
3426 /* ... and return the root of (sub)tree on it */
3429 EXPORT_SYMBOL(mount_subtree
);
3431 SYSCALL_DEFINE5(mount
, char __user
*, dev_name
, char __user
*, dir_name
,
3432 char __user
*, type
, unsigned long, flags
, void __user
*, data
)
3439 kernel_type
= copy_mount_string(type
);
3440 ret
= PTR_ERR(kernel_type
);
3441 if (IS_ERR(kernel_type
))
3444 kernel_dev
= copy_mount_string(dev_name
);
3445 ret
= PTR_ERR(kernel_dev
);
3446 if (IS_ERR(kernel_dev
))
3449 options
= copy_mount_options(data
);
3450 ret
= PTR_ERR(options
);
3451 if (IS_ERR(options
))
3454 ret
= do_mount(kernel_dev
, dir_name
, kernel_type
, flags
, options
);
3465 #define FSMOUNT_VALID_FLAGS \
3466 (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \
3467 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME)
3469 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP)
3471 #define MOUNT_SETATTR_PROPAGATION_FLAGS \
3472 (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED)
3474 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags
)
3476 unsigned int mnt_flags
= 0;
3478 if (attr_flags
& MOUNT_ATTR_RDONLY
)
3479 mnt_flags
|= MNT_READONLY
;
3480 if (attr_flags
& MOUNT_ATTR_NOSUID
)
3481 mnt_flags
|= MNT_NOSUID
;
3482 if (attr_flags
& MOUNT_ATTR_NODEV
)
3483 mnt_flags
|= MNT_NODEV
;
3484 if (attr_flags
& MOUNT_ATTR_NOEXEC
)
3485 mnt_flags
|= MNT_NOEXEC
;
3486 if (attr_flags
& MOUNT_ATTR_NODIRATIME
)
3487 mnt_flags
|= MNT_NODIRATIME
;
3493 * Create a kernel mount representation for a new, prepared superblock
3494 * (specified by fs_fd) and attach to an open_tree-like file descriptor.
3496 SYSCALL_DEFINE3(fsmount
, int, fs_fd
, unsigned int, flags
,
3497 unsigned int, attr_flags
)
3499 struct mnt_namespace
*ns
;
3500 struct fs_context
*fc
;
3502 struct path newmount
;
3505 unsigned int mnt_flags
= 0;
3511 if ((flags
& ~(FSMOUNT_CLOEXEC
)) != 0)
3514 if (attr_flags
& ~FSMOUNT_VALID_FLAGS
)
3517 mnt_flags
= attr_flags_to_mnt_flags(attr_flags
);
3519 switch (attr_flags
& MOUNT_ATTR__ATIME
) {
3520 case MOUNT_ATTR_STRICTATIME
:
3522 case MOUNT_ATTR_NOATIME
:
3523 mnt_flags
|= MNT_NOATIME
;
3525 case MOUNT_ATTR_RELATIME
:
3526 mnt_flags
|= MNT_RELATIME
;
3537 if (f
.file
->f_op
!= &fscontext_fops
)
3540 fc
= f
.file
->private_data
;
3542 ret
= mutex_lock_interruptible(&fc
->uapi_mutex
);
3546 /* There must be a valid superblock or we can't mount it */
3552 if (mount_too_revealing(fc
->root
->d_sb
, &mnt_flags
)) {
3553 pr_warn("VFS: Mount too revealing\n");
3558 if (fc
->phase
!= FS_CONTEXT_AWAITING_MOUNT
)
3562 if ((fc
->sb_flags
& SB_MANDLOCK
) && !may_mandlock())
3565 newmount
.mnt
= vfs_create_mount(fc
);
3566 if (IS_ERR(newmount
.mnt
)) {
3567 ret
= PTR_ERR(newmount
.mnt
);
3570 newmount
.dentry
= dget(fc
->root
);
3571 newmount
.mnt
->mnt_flags
= mnt_flags
;
3573 /* We've done the mount bit - now move the file context into more or
3574 * less the same state as if we'd done an fspick(). We don't want to
3575 * do any memory allocation or anything like that at this point as we
3576 * don't want to have to handle any errors incurred.
3578 vfs_clean_context(fc
);
3580 ns
= alloc_mnt_ns(current
->nsproxy
->mnt_ns
->user_ns
, true);
3585 mnt
= real_mount(newmount
.mnt
);
3589 list_add(&mnt
->mnt_list
, &ns
->list
);
3590 mntget(newmount
.mnt
);
3592 /* Attach to an apparent O_PATH fd with a note that we need to unmount
3593 * it, not just simply put it.
3595 file
= dentry_open(&newmount
, O_PATH
, fc
->cred
);
3597 dissolve_on_fput(newmount
.mnt
);
3598 ret
= PTR_ERR(file
);
3601 file
->f_mode
|= FMODE_NEED_UNMOUNT
;
3603 ret
= get_unused_fd_flags((flags
& FSMOUNT_CLOEXEC
) ? O_CLOEXEC
: 0);
3605 fd_install(ret
, file
);
3610 path_put(&newmount
);
3612 mutex_unlock(&fc
->uapi_mutex
);
3619 * Move a mount from one place to another. In combination with
3620 * fsopen()/fsmount() this is used to install a new mount and in combination
3621 * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy
3624 * Note the flags value is a combination of MOVE_MOUNT_* flags.
3626 SYSCALL_DEFINE5(move_mount
,
3627 int, from_dfd
, const char __user
*, from_pathname
,
3628 int, to_dfd
, const char __user
*, to_pathname
,
3629 unsigned int, flags
)
3631 struct path from_path
, to_path
;
3632 unsigned int lflags
;
3638 if (flags
& ~MOVE_MOUNT__MASK
)
3641 /* If someone gives a pathname, they aren't permitted to move
3642 * from an fd that requires unmount as we can't get at the flag
3643 * to clear it afterwards.
3646 if (flags
& MOVE_MOUNT_F_SYMLINKS
) lflags
|= LOOKUP_FOLLOW
;
3647 if (flags
& MOVE_MOUNT_F_AUTOMOUNTS
) lflags
|= LOOKUP_AUTOMOUNT
;
3648 if (flags
& MOVE_MOUNT_F_EMPTY_PATH
) lflags
|= LOOKUP_EMPTY
;
3650 ret
= user_path_at(from_dfd
, from_pathname
, lflags
, &from_path
);
3655 if (flags
& MOVE_MOUNT_T_SYMLINKS
) lflags
|= LOOKUP_FOLLOW
;
3656 if (flags
& MOVE_MOUNT_T_AUTOMOUNTS
) lflags
|= LOOKUP_AUTOMOUNT
;
3657 if (flags
& MOVE_MOUNT_T_EMPTY_PATH
) lflags
|= LOOKUP_EMPTY
;
3659 ret
= user_path_at(to_dfd
, to_pathname
, lflags
, &to_path
);
3663 ret
= security_move_mount(&from_path
, &to_path
);
3667 ret
= do_move_mount(&from_path
, &to_path
);
3672 path_put(&from_path
);
3677 * Return true if path is reachable from root
3679 * namespace_sem or mount_lock is held
3681 bool is_path_reachable(struct mount
*mnt
, struct dentry
*dentry
,
3682 const struct path
*root
)
3684 while (&mnt
->mnt
!= root
->mnt
&& mnt_has_parent(mnt
)) {
3685 dentry
= mnt
->mnt_mountpoint
;
3686 mnt
= mnt
->mnt_parent
;
3688 return &mnt
->mnt
== root
->mnt
&& is_subdir(dentry
, root
->dentry
);
3691 bool path_is_under(const struct path
*path1
, const struct path
*path2
)
3694 read_seqlock_excl(&mount_lock
);
3695 res
= is_path_reachable(real_mount(path1
->mnt
), path1
->dentry
, path2
);
3696 read_sequnlock_excl(&mount_lock
);
3699 EXPORT_SYMBOL(path_is_under
);
3702 * pivot_root Semantics:
3703 * Moves the root file system of the current process to the directory put_old,
3704 * makes new_root as the new root file system of the current process, and sets
3705 * root/cwd of all processes which had them on the current root to new_root.
3708 * The new_root and put_old must be directories, and must not be on the
3709 * same file system as the current process root. The put_old must be
3710 * underneath new_root, i.e. adding a non-zero number of /.. to the string
3711 * pointed to by put_old must yield the same directory as new_root. No other
3712 * file system may be mounted on put_old. After all, new_root is a mountpoint.
3714 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
3715 * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives
3716 * in this situation.
3719 * - we don't move root/cwd if they are not at the root (reason: if something
3720 * cared enough to change them, it's probably wrong to force them elsewhere)
3721 * - it's okay to pick a root that isn't the root of a file system, e.g.
3722 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
3723 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
3726 SYSCALL_DEFINE2(pivot_root
, const char __user
*, new_root
,
3727 const char __user
*, put_old
)
3729 struct path
new, old
, root
;
3730 struct mount
*new_mnt
, *root_mnt
, *old_mnt
, *root_parent
, *ex_parent
;
3731 struct mountpoint
*old_mp
, *root_mp
;
3737 error
= user_path_at(AT_FDCWD
, new_root
,
3738 LOOKUP_FOLLOW
| LOOKUP_DIRECTORY
, &new);
3742 error
= user_path_at(AT_FDCWD
, put_old
,
3743 LOOKUP_FOLLOW
| LOOKUP_DIRECTORY
, &old
);
3747 error
= security_sb_pivotroot(&old
, &new);
3751 get_fs_root(current
->fs
, &root
);
3752 old_mp
= lock_mount(&old
);
3753 error
= PTR_ERR(old_mp
);
3758 new_mnt
= real_mount(new.mnt
);
3759 root_mnt
= real_mount(root
.mnt
);
3760 old_mnt
= real_mount(old
.mnt
);
3761 ex_parent
= new_mnt
->mnt_parent
;
3762 root_parent
= root_mnt
->mnt_parent
;
3763 if (IS_MNT_SHARED(old_mnt
) ||
3764 IS_MNT_SHARED(ex_parent
) ||
3765 IS_MNT_SHARED(root_parent
))
3767 if (!check_mnt(root_mnt
) || !check_mnt(new_mnt
))
3769 if (new_mnt
->mnt
.mnt_flags
& MNT_LOCKED
)
3772 if (d_unlinked(new.dentry
))
3775 if (new_mnt
== root_mnt
|| old_mnt
== root_mnt
)
3776 goto out4
; /* loop, on the same file system */
3778 if (root
.mnt
->mnt_root
!= root
.dentry
)
3779 goto out4
; /* not a mountpoint */
3780 if (!mnt_has_parent(root_mnt
))
3781 goto out4
; /* not attached */
3782 if (new.mnt
->mnt_root
!= new.dentry
)
3783 goto out4
; /* not a mountpoint */
3784 if (!mnt_has_parent(new_mnt
))
3785 goto out4
; /* not attached */
3786 /* make sure we can reach put_old from new_root */
3787 if (!is_path_reachable(old_mnt
, old
.dentry
, &new))
3789 /* make certain new is below the root */
3790 if (!is_path_reachable(new_mnt
, new.dentry
, &root
))
3793 umount_mnt(new_mnt
);
3794 root_mp
= unhash_mnt(root_mnt
); /* we'll need its mountpoint */
3795 if (root_mnt
->mnt
.mnt_flags
& MNT_LOCKED
) {
3796 new_mnt
->mnt
.mnt_flags
|= MNT_LOCKED
;
3797 root_mnt
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
3799 /* mount old root on put_old */
3800 attach_mnt(root_mnt
, old_mnt
, old_mp
);
3801 /* mount new_root on / */
3802 attach_mnt(new_mnt
, root_parent
, root_mp
);
3803 mnt_add_count(root_parent
, -1);
3804 touch_mnt_namespace(current
->nsproxy
->mnt_ns
);
3805 /* A moved mount should not expire automatically */
3806 list_del_init(&new_mnt
->mnt_expire
);
3807 put_mountpoint(root_mp
);
3808 unlock_mount_hash();
3809 chroot_fs_refs(&root
, &new);
3812 unlock_mount(old_mp
);
3814 mntput_no_expire(ex_parent
);
3825 static unsigned int recalc_flags(struct mount_kattr
*kattr
, struct mount
*mnt
)
3827 unsigned int flags
= mnt
->mnt
.mnt_flags
;
3829 /* flags to clear */
3830 flags
&= ~kattr
->attr_clr
;
3831 /* flags to raise */
3832 flags
|= kattr
->attr_set
;
3837 static int can_idmap_mount(const struct mount_kattr
*kattr
, struct mount
*mnt
)
3839 struct vfsmount
*m
= &mnt
->mnt
;
3841 if (!kattr
->mnt_userns
)
3845 * Once a mount has been idmapped we don't allow it to change its
3846 * mapping. It makes things simpler and callers can just create
3847 * another bind-mount they can idmap if they want to.
3849 if (mnt_user_ns(m
) != &init_user_ns
)
3852 /* The underlying filesystem doesn't support idmapped mounts yet. */
3853 if (!(m
->mnt_sb
->s_type
->fs_flags
& FS_ALLOW_IDMAP
))
3856 /* We're not controlling the superblock. */
3857 if (!ns_capable(m
->mnt_sb
->s_user_ns
, CAP_SYS_ADMIN
))
3860 /* Mount has already been visible in the filesystem hierarchy. */
3861 if (!is_anon_ns(mnt
->mnt_ns
))
3867 static struct mount
*mount_setattr_prepare(struct mount_kattr
*kattr
,
3868 struct mount
*mnt
, int *err
)
3870 struct mount
*m
= mnt
, *last
= NULL
;
3872 if (!is_mounted(&m
->mnt
)) {
3877 if (!(mnt_has_parent(m
) ? check_mnt(m
) : is_anon_ns(m
->mnt_ns
))) {
3885 flags
= recalc_flags(kattr
, m
);
3886 if (!can_change_locked_flags(m
, flags
)) {
3891 *err
= can_idmap_mount(kattr
, m
);
3897 if ((kattr
->attr_set
& MNT_READONLY
) &&
3898 !(m
->mnt
.mnt_flags
& MNT_READONLY
)) {
3899 *err
= mnt_hold_writers(m
);
3903 } while (kattr
->recurse
&& (m
= next_mnt(m
, mnt
)));
3909 static void do_idmap_mount(const struct mount_kattr
*kattr
, struct mount
*mnt
)
3911 struct user_namespace
*mnt_userns
;
3913 if (!kattr
->mnt_userns
)
3916 mnt_userns
= get_user_ns(kattr
->mnt_userns
);
3917 /* Pairs with smp_load_acquire() in mnt_user_ns(). */
3918 smp_store_release(&mnt
->mnt
.mnt_userns
, mnt_userns
);
3921 static void mount_setattr_commit(struct mount_kattr
*kattr
,
3922 struct mount
*mnt
, struct mount
*last
,
3925 struct mount
*m
= mnt
;
3931 do_idmap_mount(kattr
, m
);
3932 flags
= recalc_flags(kattr
, m
);
3933 WRITE_ONCE(m
->mnt
.mnt_flags
, flags
);
3937 * We either set MNT_READONLY above so make it visible
3938 * before ~MNT_WRITE_HOLD or we failed to recursively
3939 * apply mount options.
3941 if ((kattr
->attr_set
& MNT_READONLY
) &&
3942 (m
->mnt
.mnt_flags
& MNT_WRITE_HOLD
))
3943 mnt_unhold_writers(m
);
3945 if (!err
&& kattr
->propagation
)
3946 change_mnt_propagation(m
, kattr
->propagation
);
3949 * On failure, only cleanup until we found the first mount
3950 * we failed to handle.
3952 if (err
&& m
== last
)
3954 } while (kattr
->recurse
&& (m
= next_mnt(m
, mnt
)));
3957 touch_mnt_namespace(mnt
->mnt_ns
);
3960 static int do_mount_setattr(struct path
*path
, struct mount_kattr
*kattr
)
3962 struct mount
*mnt
= real_mount(path
->mnt
), *last
= NULL
;
3965 if (path
->dentry
!= mnt
->mnt
.mnt_root
)
3968 if (kattr
->propagation
) {
3970 * Only take namespace_lock() if we're actually changing
3974 if (kattr
->propagation
== MS_SHARED
) {
3975 err
= invent_group_ids(mnt
, kattr
->recurse
);
3986 * Get the mount tree in a shape where we can change mount
3987 * properties without failure.
3989 last
= mount_setattr_prepare(kattr
, mnt
, &err
);
3990 if (last
) /* Commit all changes or revert to the old state. */
3991 mount_setattr_commit(kattr
, mnt
, last
, err
);
3993 unlock_mount_hash();
3995 if (kattr
->propagation
) {
3998 cleanup_group_ids(mnt
, NULL
);
4004 static int build_mount_idmapped(const struct mount_attr
*attr
, size_t usize
,
4005 struct mount_kattr
*kattr
, unsigned int flags
)
4008 struct ns_common
*ns
;
4009 struct user_namespace
*mnt_userns
;
4012 if (!((attr
->attr_set
| attr
->attr_clr
) & MOUNT_ATTR_IDMAP
))
4016 * We currently do not support clearing an idmapped mount. If this ever
4017 * is a use-case we can revisit this but for now let's keep it simple
4020 if (attr
->attr_clr
& MOUNT_ATTR_IDMAP
)
4023 if (attr
->userns_fd
> INT_MAX
)
4026 file
= fget(attr
->userns_fd
);
4030 if (!proc_ns_file(file
)) {
4035 ns
= get_proc_ns(file_inode(file
));
4036 if (ns
->ops
->type
!= CLONE_NEWUSER
) {
4042 * The init_user_ns is used to indicate that a vfsmount is not idmapped.
4043 * This is simpler than just having to treat NULL as unmapped. Users
4044 * wanting to idmap a mount to init_user_ns can just use a namespace
4045 * with an identity mapping.
4047 mnt_userns
= container_of(ns
, struct user_namespace
, ns
);
4048 if (mnt_userns
== &init_user_ns
) {
4052 kattr
->mnt_userns
= get_user_ns(mnt_userns
);
4059 static int build_mount_kattr(const struct mount_attr
*attr
, size_t usize
,
4060 struct mount_kattr
*kattr
, unsigned int flags
)
4062 unsigned int lookup_flags
= LOOKUP_AUTOMOUNT
| LOOKUP_FOLLOW
;
4064 if (flags
& AT_NO_AUTOMOUNT
)
4065 lookup_flags
&= ~LOOKUP_AUTOMOUNT
;
4066 if (flags
& AT_SYMLINK_NOFOLLOW
)
4067 lookup_flags
&= ~LOOKUP_FOLLOW
;
4068 if (flags
& AT_EMPTY_PATH
)
4069 lookup_flags
|= LOOKUP_EMPTY
;
4071 *kattr
= (struct mount_kattr
) {
4072 .lookup_flags
= lookup_flags
,
4073 .recurse
= !!(flags
& AT_RECURSIVE
),
4076 if (attr
->propagation
& ~MOUNT_SETATTR_PROPAGATION_FLAGS
)
4078 if (hweight32(attr
->propagation
& MOUNT_SETATTR_PROPAGATION_FLAGS
) > 1)
4080 kattr
->propagation
= attr
->propagation
;
4082 if ((attr
->attr_set
| attr
->attr_clr
) & ~MOUNT_SETATTR_VALID_FLAGS
)
4085 kattr
->attr_set
= attr_flags_to_mnt_flags(attr
->attr_set
);
4086 kattr
->attr_clr
= attr_flags_to_mnt_flags(attr
->attr_clr
);
4089 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap,
4090 * users wanting to transition to a different atime setting cannot
4091 * simply specify the atime setting in @attr_set, but must also
4092 * specify MOUNT_ATTR__ATIME in the @attr_clr field.
4093 * So ensure that MOUNT_ATTR__ATIME can't be partially set in
4094 * @attr_clr and that @attr_set can't have any atime bits set if
4095 * MOUNT_ATTR__ATIME isn't set in @attr_clr.
4097 if (attr
->attr_clr
& MOUNT_ATTR__ATIME
) {
4098 if ((attr
->attr_clr
& MOUNT_ATTR__ATIME
) != MOUNT_ATTR__ATIME
)
4102 * Clear all previous time settings as they are mutually
4105 kattr
->attr_clr
|= MNT_RELATIME
| MNT_NOATIME
;
4106 switch (attr
->attr_set
& MOUNT_ATTR__ATIME
) {
4107 case MOUNT_ATTR_RELATIME
:
4108 kattr
->attr_set
|= MNT_RELATIME
;
4110 case MOUNT_ATTR_NOATIME
:
4111 kattr
->attr_set
|= MNT_NOATIME
;
4113 case MOUNT_ATTR_STRICTATIME
:
4119 if (attr
->attr_set
& MOUNT_ATTR__ATIME
)
4123 return build_mount_idmapped(attr
, usize
, kattr
, flags
);
4126 static void finish_mount_kattr(struct mount_kattr
*kattr
)
4128 put_user_ns(kattr
->mnt_userns
);
4129 kattr
->mnt_userns
= NULL
;
4132 SYSCALL_DEFINE5(mount_setattr
, int, dfd
, const char __user
*, path
,
4133 unsigned int, flags
, struct mount_attr __user
*, uattr
,
4138 struct mount_attr attr
;
4139 struct mount_kattr kattr
;
4141 BUILD_BUG_ON(sizeof(struct mount_attr
) != MOUNT_ATTR_SIZE_VER0
);
4143 if (flags
& ~(AT_EMPTY_PATH
|
4145 AT_SYMLINK_NOFOLLOW
|
4149 if (unlikely(usize
> PAGE_SIZE
))
4151 if (unlikely(usize
< MOUNT_ATTR_SIZE_VER0
))
4157 err
= copy_struct_from_user(&attr
, sizeof(attr
), uattr
, usize
);
4161 /* Don't bother walking through the mounts if this is a nop. */
4162 if (attr
.attr_set
== 0 &&
4163 attr
.attr_clr
== 0 &&
4164 attr
.propagation
== 0)
4167 err
= build_mount_kattr(&attr
, usize
, &kattr
, flags
);
4171 err
= user_path_at(dfd
, path
, kattr
.lookup_flags
, &target
);
4175 err
= do_mount_setattr(&target
, &kattr
);
4176 finish_mount_kattr(&kattr
);
4181 static void __init
init_mount_tree(void)
4183 struct vfsmount
*mnt
;
4185 struct mnt_namespace
*ns
;
4188 mnt
= vfs_kern_mount(&rootfs_fs_type
, 0, "rootfs", NULL
);
4190 panic("Can't create rootfs");
4192 ns
= alloc_mnt_ns(&init_user_ns
, false);
4194 panic("Can't allocate initial namespace");
4195 m
= real_mount(mnt
);
4199 list_add(&m
->mnt_list
, &ns
->list
);
4200 init_task
.nsproxy
->mnt_ns
= ns
;
4204 root
.dentry
= mnt
->mnt_root
;
4205 mnt
->mnt_flags
|= MNT_LOCKED
;
4207 set_fs_pwd(current
->fs
, &root
);
4208 set_fs_root(current
->fs
, &root
);
4211 void __init
mnt_init(void)
4215 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct mount
),
4216 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
);
4218 mount_hashtable
= alloc_large_system_hash("Mount-cache",
4219 sizeof(struct hlist_head
),
4222 &m_hash_shift
, &m_hash_mask
, 0, 0);
4223 mountpoint_hashtable
= alloc_large_system_hash("Mountpoint-cache",
4224 sizeof(struct hlist_head
),
4227 &mp_hash_shift
, &mp_hash_mask
, 0, 0);
4229 if (!mount_hashtable
|| !mountpoint_hashtable
)
4230 panic("Failed to allocate mount hash table\n");
4236 printk(KERN_WARNING
"%s: sysfs_init error: %d\n",
4238 fs_kobj
= kobject_create_and_add("fs", NULL
);
4240 printk(KERN_WARNING
"%s: kobj create error\n", __func__
);
4246 void put_mnt_ns(struct mnt_namespace
*ns
)
4248 if (!refcount_dec_and_test(&ns
->ns
.count
))
4250 drop_collected_mounts(&ns
->root
->mnt
);
4254 struct vfsmount
*kern_mount(struct file_system_type
*type
)
4256 struct vfsmount
*mnt
;
4257 mnt
= vfs_kern_mount(type
, SB_KERNMOUNT
, type
->name
, NULL
);
4260 * it is a longterm mount, don't release mnt until
4261 * we unmount before file sys is unregistered
4263 real_mount(mnt
)->mnt_ns
= MNT_NS_INTERNAL
;
4267 EXPORT_SYMBOL_GPL(kern_mount
);
4269 void kern_unmount(struct vfsmount
*mnt
)
4271 /* release long term mount so mount point can be released */
4272 if (!IS_ERR_OR_NULL(mnt
)) {
4273 real_mount(mnt
)->mnt_ns
= NULL
;
4274 synchronize_rcu(); /* yecchhh... */
4278 EXPORT_SYMBOL(kern_unmount
);
4280 void kern_unmount_array(struct vfsmount
*mnt
[], unsigned int num
)
4284 for (i
= 0; i
< num
; i
++)
4286 real_mount(mnt
[i
])->mnt_ns
= NULL
;
4287 synchronize_rcu_expedited();
4288 for (i
= 0; i
< num
; i
++)
4291 EXPORT_SYMBOL(kern_unmount_array
);
4293 bool our_mnt(struct vfsmount
*mnt
)
4295 return check_mnt(real_mount(mnt
));
4298 bool current_chrooted(void)
4300 /* Does the current process have a non-standard root */
4301 struct path ns_root
;
4302 struct path fs_root
;
4305 /* Find the namespace root */
4306 ns_root
.mnt
= ¤t
->nsproxy
->mnt_ns
->root
->mnt
;
4307 ns_root
.dentry
= ns_root
.mnt
->mnt_root
;
4309 while (d_mountpoint(ns_root
.dentry
) && follow_down_one(&ns_root
))
4312 get_fs_root(current
->fs
, &fs_root
);
4314 chrooted
= !path_equal(&fs_root
, &ns_root
);
4322 static bool mnt_already_visible(struct mnt_namespace
*ns
,
4323 const struct super_block
*sb
,
4326 int new_flags
= *new_mnt_flags
;
4328 bool visible
= false;
4330 down_read(&namespace_sem
);
4332 list_for_each_entry(mnt
, &ns
->list
, mnt_list
) {
4333 struct mount
*child
;
4336 if (mnt_is_cursor(mnt
))
4339 if (mnt
->mnt
.mnt_sb
->s_type
!= sb
->s_type
)
4342 /* This mount is not fully visible if it's root directory
4343 * is not the root directory of the filesystem.
4345 if (mnt
->mnt
.mnt_root
!= mnt
->mnt
.mnt_sb
->s_root
)
4348 /* A local view of the mount flags */
4349 mnt_flags
= mnt
->mnt
.mnt_flags
;
4351 /* Don't miss readonly hidden in the superblock flags */
4352 if (sb_rdonly(mnt
->mnt
.mnt_sb
))
4353 mnt_flags
|= MNT_LOCK_READONLY
;
4355 /* Verify the mount flags are equal to or more permissive
4356 * than the proposed new mount.
4358 if ((mnt_flags
& MNT_LOCK_READONLY
) &&
4359 !(new_flags
& MNT_READONLY
))
4361 if ((mnt_flags
& MNT_LOCK_ATIME
) &&
4362 ((mnt_flags
& MNT_ATIME_MASK
) != (new_flags
& MNT_ATIME_MASK
)))
4365 /* This mount is not fully visible if there are any
4366 * locked child mounts that cover anything except for
4367 * empty directories.
4369 list_for_each_entry(child
, &mnt
->mnt_mounts
, mnt_child
) {
4370 struct inode
*inode
= child
->mnt_mountpoint
->d_inode
;
4371 /* Only worry about locked mounts */
4372 if (!(child
->mnt
.mnt_flags
& MNT_LOCKED
))
4374 /* Is the directory permanetly empty? */
4375 if (!is_empty_dir_inode(inode
))
4378 /* Preserve the locked attributes */
4379 *new_mnt_flags
|= mnt_flags
& (MNT_LOCK_READONLY
| \
4387 up_read(&namespace_sem
);
4391 static bool mount_too_revealing(const struct super_block
*sb
, int *new_mnt_flags
)
4393 const unsigned long required_iflags
= SB_I_NOEXEC
| SB_I_NODEV
;
4394 struct mnt_namespace
*ns
= current
->nsproxy
->mnt_ns
;
4395 unsigned long s_iflags
;
4397 if (ns
->user_ns
== &init_user_ns
)
4400 /* Can this filesystem be too revealing? */
4401 s_iflags
= sb
->s_iflags
;
4402 if (!(s_iflags
& SB_I_USERNS_VISIBLE
))
4405 if ((s_iflags
& required_iflags
) != required_iflags
) {
4406 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
4411 return !mnt_already_visible(ns
, sb
, new_mnt_flags
);
4414 bool mnt_may_suid(struct vfsmount
*mnt
)
4417 * Foreign mounts (accessed via fchdir or through /proc
4418 * symlinks) are always treated as if they are nosuid. This
4419 * prevents namespaces from trusting potentially unsafe
4420 * suid/sgid bits, file caps, or security labels that originate
4421 * in other namespaces.
4423 return !(mnt
->mnt_flags
& MNT_NOSUID
) && check_mnt(real_mount(mnt
)) &&
4424 current_in_userns(mnt
->mnt_sb
->s_user_ns
);
4427 static struct ns_common
*mntns_get(struct task_struct
*task
)
4429 struct ns_common
*ns
= NULL
;
4430 struct nsproxy
*nsproxy
;
4433 nsproxy
= task
->nsproxy
;
4435 ns
= &nsproxy
->mnt_ns
->ns
;
4436 get_mnt_ns(to_mnt_ns(ns
));
4443 static void mntns_put(struct ns_common
*ns
)
4445 put_mnt_ns(to_mnt_ns(ns
));
4448 static int mntns_install(struct nsset
*nsset
, struct ns_common
*ns
)
4450 struct nsproxy
*nsproxy
= nsset
->nsproxy
;
4451 struct fs_struct
*fs
= nsset
->fs
;
4452 struct mnt_namespace
*mnt_ns
= to_mnt_ns(ns
), *old_mnt_ns
;
4453 struct user_namespace
*user_ns
= nsset
->cred
->user_ns
;
4457 if (!ns_capable(mnt_ns
->user_ns
, CAP_SYS_ADMIN
) ||
4458 !ns_capable(user_ns
, CAP_SYS_CHROOT
) ||
4459 !ns_capable(user_ns
, CAP_SYS_ADMIN
))
4462 if (is_anon_ns(mnt_ns
))
4469 old_mnt_ns
= nsproxy
->mnt_ns
;
4470 nsproxy
->mnt_ns
= mnt_ns
;
4473 err
= vfs_path_lookup(mnt_ns
->root
->mnt
.mnt_root
, &mnt_ns
->root
->mnt
,
4474 "/", LOOKUP_DOWN
, &root
);
4476 /* revert to old namespace */
4477 nsproxy
->mnt_ns
= old_mnt_ns
;
4482 put_mnt_ns(old_mnt_ns
);
4484 /* Update the pwd and root */
4485 set_fs_pwd(fs
, &root
);
4486 set_fs_root(fs
, &root
);
4492 static struct user_namespace
*mntns_owner(struct ns_common
*ns
)
4494 return to_mnt_ns(ns
)->user_ns
;
4497 const struct proc_ns_operations mntns_operations
= {
4499 .type
= CLONE_NEWNS
,
4502 .install
= mntns_install
,
4503 .owner
= mntns_owner
,