4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/idr.h>
19 #include <linux/init.h> /* init_rootfs */
20 #include <linux/fs_struct.h> /* get_fs_root et.al. */
21 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
22 #include <linux/uaccess.h>
23 #include <linux/proc_ns.h>
24 #include <linux/magic.h>
25 #include <linux/bootmem.h>
29 static unsigned int m_hash_mask __read_mostly
;
30 static unsigned int m_hash_shift __read_mostly
;
31 static unsigned int mp_hash_mask __read_mostly
;
32 static unsigned int mp_hash_shift __read_mostly
;
34 static __initdata
unsigned long mhash_entries
;
35 static int __init
set_mhash_entries(char *str
)
39 mhash_entries
= simple_strtoul(str
, &str
, 0);
42 __setup("mhash_entries=", set_mhash_entries
);
44 static __initdata
unsigned long mphash_entries
;
45 static int __init
set_mphash_entries(char *str
)
49 mphash_entries
= simple_strtoul(str
, &str
, 0);
52 __setup("mphash_entries=", set_mphash_entries
);
55 static DEFINE_IDA(mnt_id_ida
);
56 static DEFINE_IDA(mnt_group_ida
);
57 static DEFINE_SPINLOCK(mnt_id_lock
);
58 static int mnt_id_start
= 0;
59 static int mnt_group_start
= 1;
61 static struct hlist_head
*mount_hashtable __read_mostly
;
62 static struct hlist_head
*mountpoint_hashtable __read_mostly
;
63 static struct kmem_cache
*mnt_cache __read_mostly
;
64 static DECLARE_RWSEM(namespace_sem
);
67 struct kobject
*fs_kobj
;
68 EXPORT_SYMBOL_GPL(fs_kobj
);
71 * vfsmount lock may be taken for read to prevent changes to the
72 * vfsmount hash, ie. during mountpoint lookups or walking back
75 * It should be taken for write in all cases where the vfsmount
76 * tree or hash is modified or when a vfsmount structure is modified.
78 __cacheline_aligned_in_smp
DEFINE_SEQLOCK(mount_lock
);
80 static inline struct hlist_head
*m_hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
82 unsigned long tmp
= ((unsigned long)mnt
/ L1_CACHE_BYTES
);
83 tmp
+= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
84 tmp
= tmp
+ (tmp
>> m_hash_shift
);
85 return &mount_hashtable
[tmp
& m_hash_mask
];
88 static inline struct hlist_head
*mp_hash(struct dentry
*dentry
)
90 unsigned long tmp
= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
91 tmp
= tmp
+ (tmp
>> mp_hash_shift
);
92 return &mountpoint_hashtable
[tmp
& mp_hash_mask
];
96 * allocation is serialized by namespace_sem, but we need the spinlock to
97 * serialize with freeing.
99 static int mnt_alloc_id(struct mount
*mnt
)
104 ida_pre_get(&mnt_id_ida
, GFP_KERNEL
);
105 spin_lock(&mnt_id_lock
);
106 res
= ida_get_new_above(&mnt_id_ida
, mnt_id_start
, &mnt
->mnt_id
);
108 mnt_id_start
= mnt
->mnt_id
+ 1;
109 spin_unlock(&mnt_id_lock
);
116 static void mnt_free_id(struct mount
*mnt
)
118 int id
= mnt
->mnt_id
;
119 spin_lock(&mnt_id_lock
);
120 ida_remove(&mnt_id_ida
, id
);
121 if (mnt_id_start
> id
)
123 spin_unlock(&mnt_id_lock
);
127 * Allocate a new peer group ID
129 * mnt_group_ida is protected by namespace_sem
131 static int mnt_alloc_group_id(struct mount
*mnt
)
135 if (!ida_pre_get(&mnt_group_ida
, GFP_KERNEL
))
138 res
= ida_get_new_above(&mnt_group_ida
,
142 mnt_group_start
= mnt
->mnt_group_id
+ 1;
148 * Release a peer group ID
150 void mnt_release_group_id(struct mount
*mnt
)
152 int id
= mnt
->mnt_group_id
;
153 ida_remove(&mnt_group_ida
, id
);
154 if (mnt_group_start
> id
)
155 mnt_group_start
= id
;
156 mnt
->mnt_group_id
= 0;
160 * vfsmount lock must be held for read
162 static inline void mnt_add_count(struct mount
*mnt
, int n
)
165 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, n
);
174 * vfsmount lock must be held for write
176 unsigned int mnt_get_count(struct mount
*mnt
)
179 unsigned int count
= 0;
182 for_each_possible_cpu(cpu
) {
183 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_count
;
188 return mnt
->mnt_count
;
192 static struct mount
*alloc_vfsmnt(const char *name
)
194 struct mount
*mnt
= kmem_cache_zalloc(mnt_cache
, GFP_KERNEL
);
198 err
= mnt_alloc_id(mnt
);
203 mnt
->mnt_devname
= kstrdup(name
, GFP_KERNEL
);
204 if (!mnt
->mnt_devname
)
209 mnt
->mnt_pcp
= alloc_percpu(struct mnt_pcp
);
211 goto out_free_devname
;
213 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, 1);
216 mnt
->mnt_writers
= 0;
219 INIT_HLIST_NODE(&mnt
->mnt_hash
);
220 INIT_LIST_HEAD(&mnt
->mnt_child
);
221 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
222 INIT_LIST_HEAD(&mnt
->mnt_list
);
223 INIT_LIST_HEAD(&mnt
->mnt_expire
);
224 INIT_LIST_HEAD(&mnt
->mnt_share
);
225 INIT_LIST_HEAD(&mnt
->mnt_slave_list
);
226 INIT_LIST_HEAD(&mnt
->mnt_slave
);
227 #ifdef CONFIG_FSNOTIFY
228 INIT_HLIST_HEAD(&mnt
->mnt_fsnotify_marks
);
235 kfree(mnt
->mnt_devname
);
240 kmem_cache_free(mnt_cache
, mnt
);
245 * Most r/o checks on a fs are for operations that take
246 * discrete amounts of time, like a write() or unlink().
247 * We must keep track of when those operations start
248 * (for permission checks) and when they end, so that
249 * we can determine when writes are able to occur to
253 * __mnt_is_readonly: check whether a mount is read-only
254 * @mnt: the mount to check for its write status
256 * This shouldn't be used directly ouside of the VFS.
257 * It does not guarantee that the filesystem will stay
258 * r/w, just that it is right *now*. This can not and
259 * should not be used in place of IS_RDONLY(inode).
260 * mnt_want/drop_write() will _keep_ the filesystem
263 int __mnt_is_readonly(struct vfsmount
*mnt
)
265 if (mnt
->mnt_flags
& MNT_READONLY
)
267 if (mnt
->mnt_sb
->s_flags
& MS_RDONLY
)
271 EXPORT_SYMBOL_GPL(__mnt_is_readonly
);
273 static inline void mnt_inc_writers(struct mount
*mnt
)
276 this_cpu_inc(mnt
->mnt_pcp
->mnt_writers
);
282 static inline void mnt_dec_writers(struct mount
*mnt
)
285 this_cpu_dec(mnt
->mnt_pcp
->mnt_writers
);
291 static unsigned int mnt_get_writers(struct mount
*mnt
)
294 unsigned int count
= 0;
297 for_each_possible_cpu(cpu
) {
298 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_writers
;
303 return mnt
->mnt_writers
;
307 static int mnt_is_readonly(struct vfsmount
*mnt
)
309 if (mnt
->mnt_sb
->s_readonly_remount
)
311 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
313 return __mnt_is_readonly(mnt
);
317 * Most r/o & frozen checks on a fs are for operations that take discrete
318 * amounts of time, like a write() or unlink(). We must keep track of when
319 * those operations start (for permission checks) and when they end, so that we
320 * can determine when writes are able to occur to a filesystem.
323 * __mnt_want_write - get write access to a mount without freeze protection
324 * @m: the mount on which to take a write
326 * This tells the low-level filesystem that a write is about to be performed to
327 * it, and makes sure that writes are allowed (mnt it read-write) before
328 * returning success. This operation does not protect against filesystem being
329 * frozen. When the write operation is finished, __mnt_drop_write() must be
330 * called. This is effectively a refcount.
332 int __mnt_want_write(struct vfsmount
*m
)
334 struct mount
*mnt
= real_mount(m
);
338 mnt_inc_writers(mnt
);
340 * The store to mnt_inc_writers must be visible before we pass
341 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
342 * incremented count after it has set MNT_WRITE_HOLD.
345 while (ACCESS_ONCE(mnt
->mnt
.mnt_flags
) & MNT_WRITE_HOLD
)
348 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
349 * be set to match its requirements. So we must not load that until
350 * MNT_WRITE_HOLD is cleared.
353 if (mnt_is_readonly(m
)) {
354 mnt_dec_writers(mnt
);
363 * mnt_want_write - get write access to a mount
364 * @m: the mount on which to take a write
366 * This tells the low-level filesystem that a write is about to be performed to
367 * it, and makes sure that writes are allowed (mount is read-write, filesystem
368 * is not frozen) before returning success. When the write operation is
369 * finished, mnt_drop_write() must be called. This is effectively a refcount.
371 int mnt_want_write(struct vfsmount
*m
)
375 sb_start_write(m
->mnt_sb
);
376 ret
= __mnt_want_write(m
);
378 sb_end_write(m
->mnt_sb
);
381 EXPORT_SYMBOL_GPL(mnt_want_write
);
384 * mnt_clone_write - get write access to a mount
385 * @mnt: the mount on which to take a write
387 * This is effectively like mnt_want_write, except
388 * it must only be used to take an extra write reference
389 * on a mountpoint that we already know has a write reference
390 * on it. This allows some optimisation.
392 * After finished, mnt_drop_write must be called as usual to
393 * drop the reference.
395 int mnt_clone_write(struct vfsmount
*mnt
)
397 /* superblock may be r/o */
398 if (__mnt_is_readonly(mnt
))
401 mnt_inc_writers(real_mount(mnt
));
405 EXPORT_SYMBOL_GPL(mnt_clone_write
);
408 * __mnt_want_write_file - get write access to a file's mount
409 * @file: the file who's mount on which to take a write
411 * This is like __mnt_want_write, but it takes a file and can
412 * do some optimisations if the file is open for write already
414 int __mnt_want_write_file(struct file
*file
)
416 if (!(file
->f_mode
& FMODE_WRITER
))
417 return __mnt_want_write(file
->f_path
.mnt
);
419 return mnt_clone_write(file
->f_path
.mnt
);
423 * mnt_want_write_file - get write access to a file's mount
424 * @file: the file who's mount on which to take a write
426 * This is like mnt_want_write, but it takes a file and can
427 * do some optimisations if the file is open for write already
429 int mnt_want_write_file(struct file
*file
)
433 sb_start_write(file
->f_path
.mnt
->mnt_sb
);
434 ret
= __mnt_want_write_file(file
);
436 sb_end_write(file
->f_path
.mnt
->mnt_sb
);
439 EXPORT_SYMBOL_GPL(mnt_want_write_file
);
442 * __mnt_drop_write - give up write access to a mount
443 * @mnt: the mount on which to give up write access
445 * Tells the low-level filesystem that we are done
446 * performing writes to it. Must be matched with
447 * __mnt_want_write() call above.
449 void __mnt_drop_write(struct vfsmount
*mnt
)
452 mnt_dec_writers(real_mount(mnt
));
457 * mnt_drop_write - give up write access to a mount
458 * @mnt: the mount on which to give up write access
460 * Tells the low-level filesystem that we are done performing writes to it and
461 * also allows filesystem to be frozen again. Must be matched with
462 * mnt_want_write() call above.
464 void mnt_drop_write(struct vfsmount
*mnt
)
466 __mnt_drop_write(mnt
);
467 sb_end_write(mnt
->mnt_sb
);
469 EXPORT_SYMBOL_GPL(mnt_drop_write
);
471 void __mnt_drop_write_file(struct file
*file
)
473 __mnt_drop_write(file
->f_path
.mnt
);
476 void mnt_drop_write_file(struct file
*file
)
478 mnt_drop_write(file
->f_path
.mnt
);
480 EXPORT_SYMBOL(mnt_drop_write_file
);
482 static int mnt_make_readonly(struct mount
*mnt
)
487 mnt
->mnt
.mnt_flags
|= MNT_WRITE_HOLD
;
489 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
490 * should be visible before we do.
495 * With writers on hold, if this value is zero, then there are
496 * definitely no active writers (although held writers may subsequently
497 * increment the count, they'll have to wait, and decrement it after
498 * seeing MNT_READONLY).
500 * It is OK to have counter incremented on one CPU and decremented on
501 * another: the sum will add up correctly. The danger would be when we
502 * sum up each counter, if we read a counter before it is incremented,
503 * but then read another CPU's count which it has been subsequently
504 * decremented from -- we would see more decrements than we should.
505 * MNT_WRITE_HOLD protects against this scenario, because
506 * mnt_want_write first increments count, then smp_mb, then spins on
507 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
508 * we're counting up here.
510 if (mnt_get_writers(mnt
) > 0)
513 mnt
->mnt
.mnt_flags
|= MNT_READONLY
;
515 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
516 * that become unheld will see MNT_READONLY.
519 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
524 static void __mnt_unmake_readonly(struct mount
*mnt
)
527 mnt
->mnt
.mnt_flags
&= ~MNT_READONLY
;
531 int sb_prepare_remount_readonly(struct super_block
*sb
)
536 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
537 if (atomic_long_read(&sb
->s_remove_count
))
541 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
542 if (!(mnt
->mnt
.mnt_flags
& MNT_READONLY
)) {
543 mnt
->mnt
.mnt_flags
|= MNT_WRITE_HOLD
;
545 if (mnt_get_writers(mnt
) > 0) {
551 if (!err
&& atomic_long_read(&sb
->s_remove_count
))
555 sb
->s_readonly_remount
= 1;
558 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
559 if (mnt
->mnt
.mnt_flags
& MNT_WRITE_HOLD
)
560 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
567 static void free_vfsmnt(struct mount
*mnt
)
569 kfree(mnt
->mnt_devname
);
571 free_percpu(mnt
->mnt_pcp
);
573 kmem_cache_free(mnt_cache
, mnt
);
576 static void delayed_free_vfsmnt(struct rcu_head
*head
)
578 free_vfsmnt(container_of(head
, struct mount
, mnt_rcu
));
581 /* call under rcu_read_lock */
582 bool legitimize_mnt(struct vfsmount
*bastard
, unsigned seq
)
585 if (read_seqretry(&mount_lock
, seq
))
589 mnt
= real_mount(bastard
);
590 mnt_add_count(mnt
, 1);
591 if (likely(!read_seqretry(&mount_lock
, seq
)))
593 if (bastard
->mnt_flags
& MNT_SYNC_UMOUNT
) {
594 mnt_add_count(mnt
, -1);
604 * find the first mount at @dentry on vfsmount @mnt.
605 * call under rcu_read_lock()
607 struct mount
*__lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
)
609 struct hlist_head
*head
= m_hash(mnt
, dentry
);
612 hlist_for_each_entry_rcu(p
, head
, mnt_hash
)
613 if (&p
->mnt_parent
->mnt
== mnt
&& p
->mnt_mountpoint
== dentry
)
619 * find the last mount at @dentry on vfsmount @mnt.
620 * mount_lock must be held.
622 struct mount
*__lookup_mnt_last(struct vfsmount
*mnt
, struct dentry
*dentry
)
624 struct mount
*p
, *res
;
625 res
= p
= __lookup_mnt(mnt
, dentry
);
628 hlist_for_each_entry_continue(p
, mnt_hash
) {
629 if (&p
->mnt_parent
->mnt
!= mnt
|| p
->mnt_mountpoint
!= dentry
)
638 * lookup_mnt - Return the first child mount mounted at path
640 * "First" means first mounted chronologically. If you create the
643 * mount /dev/sda1 /mnt
644 * mount /dev/sda2 /mnt
645 * mount /dev/sda3 /mnt
647 * Then lookup_mnt() on the base /mnt dentry in the root mount will
648 * return successively the root dentry and vfsmount of /dev/sda1, then
649 * /dev/sda2, then /dev/sda3, then NULL.
651 * lookup_mnt takes a reference to the found vfsmount.
653 struct vfsmount
*lookup_mnt(struct path
*path
)
655 struct mount
*child_mnt
;
661 seq
= read_seqbegin(&mount_lock
);
662 child_mnt
= __lookup_mnt(path
->mnt
, path
->dentry
);
663 m
= child_mnt
? &child_mnt
->mnt
: NULL
;
664 } while (!legitimize_mnt(m
, seq
));
669 static struct mountpoint
*new_mountpoint(struct dentry
*dentry
)
671 struct hlist_head
*chain
= mp_hash(dentry
);
672 struct mountpoint
*mp
;
675 hlist_for_each_entry(mp
, chain
, m_hash
) {
676 if (mp
->m_dentry
== dentry
) {
677 /* might be worth a WARN_ON() */
678 if (d_unlinked(dentry
))
679 return ERR_PTR(-ENOENT
);
685 mp
= kmalloc(sizeof(struct mountpoint
), GFP_KERNEL
);
687 return ERR_PTR(-ENOMEM
);
689 ret
= d_set_mounted(dentry
);
695 mp
->m_dentry
= dentry
;
697 hlist_add_head(&mp
->m_hash
, chain
);
701 static void put_mountpoint(struct mountpoint
*mp
)
703 if (!--mp
->m_count
) {
704 struct dentry
*dentry
= mp
->m_dentry
;
705 spin_lock(&dentry
->d_lock
);
706 dentry
->d_flags
&= ~DCACHE_MOUNTED
;
707 spin_unlock(&dentry
->d_lock
);
708 hlist_del(&mp
->m_hash
);
713 static inline int check_mnt(struct mount
*mnt
)
715 return mnt
->mnt_ns
== current
->nsproxy
->mnt_ns
;
719 * vfsmount lock must be held for write
721 static void touch_mnt_namespace(struct mnt_namespace
*ns
)
725 wake_up_interruptible(&ns
->poll
);
730 * vfsmount lock must be held for write
732 static void __touch_mnt_namespace(struct mnt_namespace
*ns
)
734 if (ns
&& ns
->event
!= event
) {
736 wake_up_interruptible(&ns
->poll
);
741 * vfsmount lock must be held for write
743 static void detach_mnt(struct mount
*mnt
, struct path
*old_path
)
745 old_path
->dentry
= mnt
->mnt_mountpoint
;
746 old_path
->mnt
= &mnt
->mnt_parent
->mnt
;
747 mnt
->mnt_parent
= mnt
;
748 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
749 list_del_init(&mnt
->mnt_child
);
750 hlist_del_init_rcu(&mnt
->mnt_hash
);
751 put_mountpoint(mnt
->mnt_mp
);
756 * vfsmount lock must be held for write
758 void mnt_set_mountpoint(struct mount
*mnt
,
759 struct mountpoint
*mp
,
760 struct mount
*child_mnt
)
763 mnt_add_count(mnt
, 1); /* essentially, that's mntget */
764 child_mnt
->mnt_mountpoint
= dget(mp
->m_dentry
);
765 child_mnt
->mnt_parent
= mnt
;
766 child_mnt
->mnt_mp
= mp
;
770 * vfsmount lock must be held for write
772 static void attach_mnt(struct mount
*mnt
,
773 struct mount
*parent
,
774 struct mountpoint
*mp
)
776 mnt_set_mountpoint(parent
, mp
, mnt
);
777 hlist_add_head_rcu(&mnt
->mnt_hash
, m_hash(&parent
->mnt
, mp
->m_dentry
));
778 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
781 static void attach_shadowed(struct mount
*mnt
,
782 struct mount
*parent
,
783 struct mount
*shadows
)
786 hlist_add_behind_rcu(&mnt
->mnt_hash
, &shadows
->mnt_hash
);
787 list_add(&mnt
->mnt_child
, &shadows
->mnt_child
);
789 hlist_add_head_rcu(&mnt
->mnt_hash
,
790 m_hash(&parent
->mnt
, mnt
->mnt_mountpoint
));
791 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
796 * vfsmount lock must be held for write
798 static void commit_tree(struct mount
*mnt
, struct mount
*shadows
)
800 struct mount
*parent
= mnt
->mnt_parent
;
803 struct mnt_namespace
*n
= parent
->mnt_ns
;
805 BUG_ON(parent
== mnt
);
807 list_add_tail(&head
, &mnt
->mnt_list
);
808 list_for_each_entry(m
, &head
, mnt_list
)
811 list_splice(&head
, n
->list
.prev
);
813 attach_shadowed(mnt
, parent
, shadows
);
814 touch_mnt_namespace(n
);
817 static struct mount
*next_mnt(struct mount
*p
, struct mount
*root
)
819 struct list_head
*next
= p
->mnt_mounts
.next
;
820 if (next
== &p
->mnt_mounts
) {
824 next
= p
->mnt_child
.next
;
825 if (next
!= &p
->mnt_parent
->mnt_mounts
)
830 return list_entry(next
, struct mount
, mnt_child
);
833 static struct mount
*skip_mnt_tree(struct mount
*p
)
835 struct list_head
*prev
= p
->mnt_mounts
.prev
;
836 while (prev
!= &p
->mnt_mounts
) {
837 p
= list_entry(prev
, struct mount
, mnt_child
);
838 prev
= p
->mnt_mounts
.prev
;
844 vfs_kern_mount(struct file_system_type
*type
, int flags
, const char *name
, void *data
)
850 return ERR_PTR(-ENODEV
);
852 mnt
= alloc_vfsmnt(name
);
854 return ERR_PTR(-ENOMEM
);
856 if (flags
& MS_KERNMOUNT
)
857 mnt
->mnt
.mnt_flags
= MNT_INTERNAL
;
859 root
= mount_fs(type
, flags
, name
, data
);
863 return ERR_CAST(root
);
866 mnt
->mnt
.mnt_root
= root
;
867 mnt
->mnt
.mnt_sb
= root
->d_sb
;
868 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
869 mnt
->mnt_parent
= mnt
;
871 list_add_tail(&mnt
->mnt_instance
, &root
->d_sb
->s_mounts
);
875 EXPORT_SYMBOL_GPL(vfs_kern_mount
);
877 static struct mount
*clone_mnt(struct mount
*old
, struct dentry
*root
,
880 struct super_block
*sb
= old
->mnt
.mnt_sb
;
884 mnt
= alloc_vfsmnt(old
->mnt_devname
);
886 return ERR_PTR(-ENOMEM
);
888 if (flag
& (CL_SLAVE
| CL_PRIVATE
| CL_SHARED_TO_SLAVE
))
889 mnt
->mnt_group_id
= 0; /* not a peer of original */
891 mnt
->mnt_group_id
= old
->mnt_group_id
;
893 if ((flag
& CL_MAKE_SHARED
) && !mnt
->mnt_group_id
) {
894 err
= mnt_alloc_group_id(mnt
);
899 mnt
->mnt
.mnt_flags
= old
->mnt
.mnt_flags
& ~(MNT_WRITE_HOLD
|MNT_MARKED
);
900 /* Don't allow unprivileged users to change mount flags */
901 if (flag
& CL_UNPRIVILEGED
) {
902 mnt
->mnt
.mnt_flags
|= MNT_LOCK_ATIME
;
904 if (mnt
->mnt
.mnt_flags
& MNT_READONLY
)
905 mnt
->mnt
.mnt_flags
|= MNT_LOCK_READONLY
;
907 if (mnt
->mnt
.mnt_flags
& MNT_NODEV
)
908 mnt
->mnt
.mnt_flags
|= MNT_LOCK_NODEV
;
910 if (mnt
->mnt
.mnt_flags
& MNT_NOSUID
)
911 mnt
->mnt
.mnt_flags
|= MNT_LOCK_NOSUID
;
913 if (mnt
->mnt
.mnt_flags
& MNT_NOEXEC
)
914 mnt
->mnt
.mnt_flags
|= MNT_LOCK_NOEXEC
;
917 /* Don't allow unprivileged users to reveal what is under a mount */
918 if ((flag
& CL_UNPRIVILEGED
) && list_empty(&old
->mnt_expire
))
919 mnt
->mnt
.mnt_flags
|= MNT_LOCKED
;
921 atomic_inc(&sb
->s_active
);
922 mnt
->mnt
.mnt_sb
= sb
;
923 mnt
->mnt
.mnt_root
= dget(root
);
924 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
925 mnt
->mnt_parent
= mnt
;
927 list_add_tail(&mnt
->mnt_instance
, &sb
->s_mounts
);
930 if ((flag
& CL_SLAVE
) ||
931 ((flag
& CL_SHARED_TO_SLAVE
) && IS_MNT_SHARED(old
))) {
932 list_add(&mnt
->mnt_slave
, &old
->mnt_slave_list
);
933 mnt
->mnt_master
= old
;
934 CLEAR_MNT_SHARED(mnt
);
935 } else if (!(flag
& CL_PRIVATE
)) {
936 if ((flag
& CL_MAKE_SHARED
) || IS_MNT_SHARED(old
))
937 list_add(&mnt
->mnt_share
, &old
->mnt_share
);
938 if (IS_MNT_SLAVE(old
))
939 list_add(&mnt
->mnt_slave
, &old
->mnt_slave
);
940 mnt
->mnt_master
= old
->mnt_master
;
942 if (flag
& CL_MAKE_SHARED
)
945 /* stick the duplicate mount on the same expiry list
946 * as the original if that was on one */
947 if (flag
& CL_EXPIRE
) {
948 if (!list_empty(&old
->mnt_expire
))
949 list_add(&mnt
->mnt_expire
, &old
->mnt_expire
);
960 static void mntput_no_expire(struct mount
*mnt
)
963 mnt_add_count(mnt
, -1);
964 if (likely(mnt
->mnt_ns
)) { /* shouldn't be the last one */
969 if (mnt_get_count(mnt
)) {
974 if (unlikely(mnt
->mnt
.mnt_flags
& MNT_DOOMED
)) {
979 mnt
->mnt
.mnt_flags
|= MNT_DOOMED
;
982 list_del(&mnt
->mnt_instance
);
986 * This probably indicates that somebody messed
987 * up a mnt_want/drop_write() pair. If this
988 * happens, the filesystem was probably unable
989 * to make r/w->r/o transitions.
992 * The locking used to deal with mnt_count decrement provides barriers,
993 * so mnt_get_writers() below is safe.
995 WARN_ON(mnt_get_writers(mnt
));
996 if (unlikely(mnt
->mnt_pins
.first
))
998 fsnotify_vfsmount_delete(&mnt
->mnt
);
999 dput(mnt
->mnt
.mnt_root
);
1000 deactivate_super(mnt
->mnt
.mnt_sb
);
1002 call_rcu(&mnt
->mnt_rcu
, delayed_free_vfsmnt
);
1005 void mntput(struct vfsmount
*mnt
)
1008 struct mount
*m
= real_mount(mnt
);
1009 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1010 if (unlikely(m
->mnt_expiry_mark
))
1011 m
->mnt_expiry_mark
= 0;
1012 mntput_no_expire(m
);
1015 EXPORT_SYMBOL(mntput
);
1017 struct vfsmount
*mntget(struct vfsmount
*mnt
)
1020 mnt_add_count(real_mount(mnt
), 1);
1023 EXPORT_SYMBOL(mntget
);
1025 struct vfsmount
*mnt_clone_internal(struct path
*path
)
1028 p
= clone_mnt(real_mount(path
->mnt
), path
->dentry
, CL_PRIVATE
);
1031 p
->mnt
.mnt_flags
|= MNT_INTERNAL
;
1035 static inline void mangle(struct seq_file
*m
, const char *s
)
1037 seq_escape(m
, s
, " \t\n\\");
1041 * Simple .show_options callback for filesystems which don't want to
1042 * implement more complex mount option showing.
1044 * See also save_mount_options().
1046 int generic_show_options(struct seq_file
*m
, struct dentry
*root
)
1048 const char *options
;
1051 options
= rcu_dereference(root
->d_sb
->s_options
);
1053 if (options
!= NULL
&& options
[0]) {
1061 EXPORT_SYMBOL(generic_show_options
);
1064 * If filesystem uses generic_show_options(), this function should be
1065 * called from the fill_super() callback.
1067 * The .remount_fs callback usually needs to be handled in a special
1068 * way, to make sure, that previous options are not overwritten if the
1071 * Also note, that if the filesystem's .remount_fs function doesn't
1072 * reset all options to their default value, but changes only newly
1073 * given options, then the displayed options will not reflect reality
1076 void save_mount_options(struct super_block
*sb
, char *options
)
1078 BUG_ON(sb
->s_options
);
1079 rcu_assign_pointer(sb
->s_options
, kstrdup(options
, GFP_KERNEL
));
1081 EXPORT_SYMBOL(save_mount_options
);
1083 void replace_mount_options(struct super_block
*sb
, char *options
)
1085 char *old
= sb
->s_options
;
1086 rcu_assign_pointer(sb
->s_options
, options
);
1092 EXPORT_SYMBOL(replace_mount_options
);
1094 #ifdef CONFIG_PROC_FS
1095 /* iterator; we want it to have access to namespace_sem, thus here... */
1096 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
1098 struct proc_mounts
*p
= proc_mounts(m
);
1100 down_read(&namespace_sem
);
1101 if (p
->cached_event
== p
->ns
->event
) {
1102 void *v
= p
->cached_mount
;
1103 if (*pos
== p
->cached_index
)
1105 if (*pos
== p
->cached_index
+ 1) {
1106 v
= seq_list_next(v
, &p
->ns
->list
, &p
->cached_index
);
1107 return p
->cached_mount
= v
;
1111 p
->cached_event
= p
->ns
->event
;
1112 p
->cached_mount
= seq_list_start(&p
->ns
->list
, *pos
);
1113 p
->cached_index
= *pos
;
1114 return p
->cached_mount
;
1117 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1119 struct proc_mounts
*p
= proc_mounts(m
);
1121 p
->cached_mount
= seq_list_next(v
, &p
->ns
->list
, pos
);
1122 p
->cached_index
= *pos
;
1123 return p
->cached_mount
;
1126 static void m_stop(struct seq_file
*m
, void *v
)
1128 up_read(&namespace_sem
);
1131 static int m_show(struct seq_file
*m
, void *v
)
1133 struct proc_mounts
*p
= proc_mounts(m
);
1134 struct mount
*r
= list_entry(v
, struct mount
, mnt_list
);
1135 return p
->show(m
, &r
->mnt
);
1138 const struct seq_operations mounts_op
= {
1144 #endif /* CONFIG_PROC_FS */
1147 * may_umount_tree - check if a mount tree is busy
1148 * @mnt: root of mount tree
1150 * This is called to check if a tree of mounts has any
1151 * open files, pwds, chroots or sub mounts that are
1154 int may_umount_tree(struct vfsmount
*m
)
1156 struct mount
*mnt
= real_mount(m
);
1157 int actual_refs
= 0;
1158 int minimum_refs
= 0;
1162 /* write lock needed for mnt_get_count */
1164 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1165 actual_refs
+= mnt_get_count(p
);
1168 unlock_mount_hash();
1170 if (actual_refs
> minimum_refs
)
1176 EXPORT_SYMBOL(may_umount_tree
);
1179 * may_umount - check if a mount point is busy
1180 * @mnt: root of mount
1182 * This is called to check if a mount point has any
1183 * open files, pwds, chroots or sub mounts. If the
1184 * mount has sub mounts this will return busy
1185 * regardless of whether the sub mounts are busy.
1187 * Doesn't take quota and stuff into account. IOW, in some cases it will
1188 * give false negatives. The main reason why it's here is that we need
1189 * a non-destructive way to look for easily umountable filesystems.
1191 int may_umount(struct vfsmount
*mnt
)
1194 down_read(&namespace_sem
);
1196 if (propagate_mount_busy(real_mount(mnt
), 2))
1198 unlock_mount_hash();
1199 up_read(&namespace_sem
);
1203 EXPORT_SYMBOL(may_umount
);
1205 static HLIST_HEAD(unmounted
); /* protected by namespace_sem */
1207 static void namespace_unlock(void)
1210 struct hlist_head head
= unmounted
;
1212 if (likely(hlist_empty(&head
))) {
1213 up_write(&namespace_sem
);
1217 head
.first
->pprev
= &head
.first
;
1218 INIT_HLIST_HEAD(&unmounted
);
1220 up_write(&namespace_sem
);
1224 while (!hlist_empty(&head
)) {
1225 mnt
= hlist_entry(head
.first
, struct mount
, mnt_hash
);
1226 hlist_del_init(&mnt
->mnt_hash
);
1227 if (mnt
->mnt_ex_mountpoint
.mnt
)
1228 path_put(&mnt
->mnt_ex_mountpoint
);
1233 static inline void namespace_lock(void)
1235 down_write(&namespace_sem
);
1239 * mount_lock must be held
1240 * namespace_sem must be held for write
1241 * how = 0 => just this tree, don't propagate
1242 * how = 1 => propagate; we know that nobody else has reference to any victims
1243 * how = 2 => lazy umount
1245 void umount_tree(struct mount
*mnt
, int how
)
1247 HLIST_HEAD(tmp_list
);
1249 struct mount
*last
= NULL
;
1251 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1252 hlist_del_init_rcu(&p
->mnt_hash
);
1253 hlist_add_head(&p
->mnt_hash
, &tmp_list
);
1256 hlist_for_each_entry(p
, &tmp_list
, mnt_hash
)
1257 list_del_init(&p
->mnt_child
);
1260 propagate_umount(&tmp_list
);
1262 hlist_for_each_entry(p
, &tmp_list
, mnt_hash
) {
1263 list_del_init(&p
->mnt_expire
);
1264 list_del_init(&p
->mnt_list
);
1265 __touch_mnt_namespace(p
->mnt_ns
);
1268 p
->mnt
.mnt_flags
|= MNT_SYNC_UMOUNT
;
1269 if (mnt_has_parent(p
)) {
1270 put_mountpoint(p
->mnt_mp
);
1271 /* move the reference to mountpoint into ->mnt_ex_mountpoint */
1272 p
->mnt_ex_mountpoint
.dentry
= p
->mnt_mountpoint
;
1273 p
->mnt_ex_mountpoint
.mnt
= &p
->mnt_parent
->mnt
;
1274 p
->mnt_mountpoint
= p
->mnt
.mnt_root
;
1278 change_mnt_propagation(p
, MS_PRIVATE
);
1282 last
->mnt_hash
.next
= unmounted
.first
;
1283 unmounted
.first
= tmp_list
.first
;
1284 unmounted
.first
->pprev
= &unmounted
.first
;
1288 static void shrink_submounts(struct mount
*mnt
);
1290 static int do_umount(struct mount
*mnt
, int flags
)
1292 struct super_block
*sb
= mnt
->mnt
.mnt_sb
;
1295 retval
= security_sb_umount(&mnt
->mnt
, flags
);
1300 * Allow userspace to request a mountpoint be expired rather than
1301 * unmounting unconditionally. Unmount only happens if:
1302 * (1) the mark is already set (the mark is cleared by mntput())
1303 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1305 if (flags
& MNT_EXPIRE
) {
1306 if (&mnt
->mnt
== current
->fs
->root
.mnt
||
1307 flags
& (MNT_FORCE
| MNT_DETACH
))
1311 * probably don't strictly need the lock here if we examined
1312 * all race cases, but it's a slowpath.
1315 if (mnt_get_count(mnt
) != 2) {
1316 unlock_mount_hash();
1319 unlock_mount_hash();
1321 if (!xchg(&mnt
->mnt_expiry_mark
, 1))
1326 * If we may have to abort operations to get out of this
1327 * mount, and they will themselves hold resources we must
1328 * allow the fs to do things. In the Unix tradition of
1329 * 'Gee thats tricky lets do it in userspace' the umount_begin
1330 * might fail to complete on the first run through as other tasks
1331 * must return, and the like. Thats for the mount program to worry
1332 * about for the moment.
1335 if (flags
& MNT_FORCE
&& sb
->s_op
->umount_begin
) {
1336 sb
->s_op
->umount_begin(sb
);
1340 * No sense to grab the lock for this test, but test itself looks
1341 * somewhat bogus. Suggestions for better replacement?
1342 * Ho-hum... In principle, we might treat that as umount + switch
1343 * to rootfs. GC would eventually take care of the old vfsmount.
1344 * Actually it makes sense, especially if rootfs would contain a
1345 * /reboot - static binary that would close all descriptors and
1346 * call reboot(9). Then init(8) could umount root and exec /reboot.
1348 if (&mnt
->mnt
== current
->fs
->root
.mnt
&& !(flags
& MNT_DETACH
)) {
1350 * Special case for "unmounting" root ...
1351 * we just try to remount it readonly.
1353 down_write(&sb
->s_umount
);
1354 if (!(sb
->s_flags
& MS_RDONLY
))
1355 retval
= do_remount_sb(sb
, MS_RDONLY
, NULL
, 0);
1356 up_write(&sb
->s_umount
);
1364 if (flags
& MNT_DETACH
) {
1365 if (!list_empty(&mnt
->mnt_list
))
1366 umount_tree(mnt
, 2);
1369 shrink_submounts(mnt
);
1371 if (!propagate_mount_busy(mnt
, 2)) {
1372 if (!list_empty(&mnt
->mnt_list
))
1373 umount_tree(mnt
, 1);
1377 unlock_mount_hash();
1383 * Is the caller allowed to modify his namespace?
1385 static inline bool may_mount(void)
1387 return ns_capable(current
->nsproxy
->mnt_ns
->user_ns
, CAP_SYS_ADMIN
);
1391 * Now umount can handle mount points as well as block devices.
1392 * This is important for filesystems which use unnamed block devices.
1394 * We now support a flag for forced unmount like the other 'big iron'
1395 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1398 SYSCALL_DEFINE2(umount
, char __user
*, name
, int, flags
)
1403 int lookup_flags
= 0;
1405 if (flags
& ~(MNT_FORCE
| MNT_DETACH
| MNT_EXPIRE
| UMOUNT_NOFOLLOW
))
1411 if (!(flags
& UMOUNT_NOFOLLOW
))
1412 lookup_flags
|= LOOKUP_FOLLOW
;
1414 retval
= user_path_mountpoint_at(AT_FDCWD
, name
, lookup_flags
, &path
);
1417 mnt
= real_mount(path
.mnt
);
1419 if (path
.dentry
!= path
.mnt
->mnt_root
)
1421 if (!check_mnt(mnt
))
1423 if (mnt
->mnt
.mnt_flags
& MNT_LOCKED
)
1426 retval
= do_umount(mnt
, flags
);
1428 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1430 mntput_no_expire(mnt
);
1435 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1438 * The 2.0 compatible umount. No flags.
1440 SYSCALL_DEFINE1(oldumount
, char __user
*, name
)
1442 return sys_umount(name
, 0);
1447 static bool is_mnt_ns_file(struct dentry
*dentry
)
1449 /* Is this a proxy for a mount namespace? */
1450 struct inode
*inode
= dentry
->d_inode
;
1453 if (!proc_ns_inode(inode
))
1456 ei
= get_proc_ns(inode
);
1457 if (ei
->ns_ops
!= &mntns_operations
)
1463 static bool mnt_ns_loop(struct dentry
*dentry
)
1465 /* Could bind mounting the mount namespace inode cause a
1466 * mount namespace loop?
1468 struct mnt_namespace
*mnt_ns
;
1469 if (!is_mnt_ns_file(dentry
))
1472 mnt_ns
= get_proc_ns(dentry
->d_inode
)->ns
;
1473 return current
->nsproxy
->mnt_ns
->seq
>= mnt_ns
->seq
;
1476 struct mount
*copy_tree(struct mount
*mnt
, struct dentry
*dentry
,
1479 struct mount
*res
, *p
, *q
, *r
, *parent
;
1481 if (!(flag
& CL_COPY_UNBINDABLE
) && IS_MNT_UNBINDABLE(mnt
))
1482 return ERR_PTR(-EINVAL
);
1484 if (!(flag
& CL_COPY_MNT_NS_FILE
) && is_mnt_ns_file(dentry
))
1485 return ERR_PTR(-EINVAL
);
1487 res
= q
= clone_mnt(mnt
, dentry
, flag
);
1491 q
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
1492 q
->mnt_mountpoint
= mnt
->mnt_mountpoint
;
1495 list_for_each_entry(r
, &mnt
->mnt_mounts
, mnt_child
) {
1497 if (!is_subdir(r
->mnt_mountpoint
, dentry
))
1500 for (s
= r
; s
; s
= next_mnt(s
, r
)) {
1501 struct mount
*t
= NULL
;
1502 if (!(flag
& CL_COPY_UNBINDABLE
) &&
1503 IS_MNT_UNBINDABLE(s
)) {
1504 s
= skip_mnt_tree(s
);
1507 if (!(flag
& CL_COPY_MNT_NS_FILE
) &&
1508 is_mnt_ns_file(s
->mnt
.mnt_root
)) {
1509 s
= skip_mnt_tree(s
);
1512 while (p
!= s
->mnt_parent
) {
1518 q
= clone_mnt(p
, p
->mnt
.mnt_root
, flag
);
1522 list_add_tail(&q
->mnt_list
, &res
->mnt_list
);
1523 mnt_set_mountpoint(parent
, p
->mnt_mp
, q
);
1524 if (!list_empty(&parent
->mnt_mounts
)) {
1525 t
= list_last_entry(&parent
->mnt_mounts
,
1526 struct mount
, mnt_child
);
1527 if (t
->mnt_mp
!= p
->mnt_mp
)
1530 attach_shadowed(q
, parent
, t
);
1531 unlock_mount_hash();
1538 umount_tree(res
, 0);
1539 unlock_mount_hash();
1544 /* Caller should check returned pointer for errors */
1546 struct vfsmount
*collect_mounts(struct path
*path
)
1550 tree
= copy_tree(real_mount(path
->mnt
), path
->dentry
,
1551 CL_COPY_ALL
| CL_PRIVATE
);
1554 return ERR_CAST(tree
);
1558 void drop_collected_mounts(struct vfsmount
*mnt
)
1562 umount_tree(real_mount(mnt
), 0);
1563 unlock_mount_hash();
1567 int iterate_mounts(int (*f
)(struct vfsmount
*, void *), void *arg
,
1568 struct vfsmount
*root
)
1571 int res
= f(root
, arg
);
1574 list_for_each_entry(mnt
, &real_mount(root
)->mnt_list
, mnt_list
) {
1575 res
= f(&mnt
->mnt
, arg
);
1582 static void cleanup_group_ids(struct mount
*mnt
, struct mount
*end
)
1586 for (p
= mnt
; p
!= end
; p
= next_mnt(p
, mnt
)) {
1587 if (p
->mnt_group_id
&& !IS_MNT_SHARED(p
))
1588 mnt_release_group_id(p
);
1592 static int invent_group_ids(struct mount
*mnt
, bool recurse
)
1596 for (p
= mnt
; p
; p
= recurse
? next_mnt(p
, mnt
) : NULL
) {
1597 if (!p
->mnt_group_id
&& !IS_MNT_SHARED(p
)) {
1598 int err
= mnt_alloc_group_id(p
);
1600 cleanup_group_ids(mnt
, p
);
1610 * @source_mnt : mount tree to be attached
1611 * @nd : place the mount tree @source_mnt is attached
1612 * @parent_nd : if non-null, detach the source_mnt from its parent and
1613 * store the parent mount and mountpoint dentry.
1614 * (done when source_mnt is moved)
1616 * NOTE: in the table below explains the semantics when a source mount
1617 * of a given type is attached to a destination mount of a given type.
1618 * ---------------------------------------------------------------------------
1619 * | BIND MOUNT OPERATION |
1620 * |**************************************************************************
1621 * | source-->| shared | private | slave | unbindable |
1625 * |**************************************************************************
1626 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1628 * |non-shared| shared (+) | private | slave (*) | invalid |
1629 * ***************************************************************************
1630 * A bind operation clones the source mount and mounts the clone on the
1631 * destination mount.
1633 * (++) the cloned mount is propagated to all the mounts in the propagation
1634 * tree of the destination mount and the cloned mount is added to
1635 * the peer group of the source mount.
1636 * (+) the cloned mount is created under the destination mount and is marked
1637 * as shared. The cloned mount is added to the peer group of the source
1639 * (+++) the mount is propagated to all the mounts in the propagation tree
1640 * of the destination mount and the cloned mount is made slave
1641 * of the same master as that of the source mount. The cloned mount
1642 * is marked as 'shared and slave'.
1643 * (*) the cloned mount is made a slave of the same master as that of the
1646 * ---------------------------------------------------------------------------
1647 * | MOVE MOUNT OPERATION |
1648 * |**************************************************************************
1649 * | source-->| shared | private | slave | unbindable |
1653 * |**************************************************************************
1654 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1656 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1657 * ***************************************************************************
1659 * (+) the mount is moved to the destination. And is then propagated to
1660 * all the mounts in the propagation tree of the destination mount.
1661 * (+*) the mount is moved to the destination.
1662 * (+++) the mount is moved to the destination and is then propagated to
1663 * all the mounts belonging to the destination mount's propagation tree.
1664 * the mount is marked as 'shared and slave'.
1665 * (*) the mount continues to be a slave at the new location.
1667 * if the source mount is a tree, the operations explained above is
1668 * applied to each mount in the tree.
1669 * Must be called without spinlocks held, since this function can sleep
1672 static int attach_recursive_mnt(struct mount
*source_mnt
,
1673 struct mount
*dest_mnt
,
1674 struct mountpoint
*dest_mp
,
1675 struct path
*parent_path
)
1677 HLIST_HEAD(tree_list
);
1678 struct mount
*child
, *p
;
1679 struct hlist_node
*n
;
1682 if (IS_MNT_SHARED(dest_mnt
)) {
1683 err
= invent_group_ids(source_mnt
, true);
1686 err
= propagate_mnt(dest_mnt
, dest_mp
, source_mnt
, &tree_list
);
1689 goto out_cleanup_ids
;
1690 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
1696 detach_mnt(source_mnt
, parent_path
);
1697 attach_mnt(source_mnt
, dest_mnt
, dest_mp
);
1698 touch_mnt_namespace(source_mnt
->mnt_ns
);
1700 mnt_set_mountpoint(dest_mnt
, dest_mp
, source_mnt
);
1701 commit_tree(source_mnt
, NULL
);
1704 hlist_for_each_entry_safe(child
, n
, &tree_list
, mnt_hash
) {
1706 hlist_del_init(&child
->mnt_hash
);
1707 q
= __lookup_mnt_last(&child
->mnt_parent
->mnt
,
1708 child
->mnt_mountpoint
);
1709 commit_tree(child
, q
);
1711 unlock_mount_hash();
1716 while (!hlist_empty(&tree_list
)) {
1717 child
= hlist_entry(tree_list
.first
, struct mount
, mnt_hash
);
1718 umount_tree(child
, 0);
1720 unlock_mount_hash();
1721 cleanup_group_ids(source_mnt
, NULL
);
1726 static struct mountpoint
*lock_mount(struct path
*path
)
1728 struct vfsmount
*mnt
;
1729 struct dentry
*dentry
= path
->dentry
;
1731 mutex_lock(&dentry
->d_inode
->i_mutex
);
1732 if (unlikely(cant_mount(dentry
))) {
1733 mutex_unlock(&dentry
->d_inode
->i_mutex
);
1734 return ERR_PTR(-ENOENT
);
1737 mnt
= lookup_mnt(path
);
1739 struct mountpoint
*mp
= new_mountpoint(dentry
);
1742 mutex_unlock(&dentry
->d_inode
->i_mutex
);
1748 mutex_unlock(&path
->dentry
->d_inode
->i_mutex
);
1751 dentry
= path
->dentry
= dget(mnt
->mnt_root
);
1755 static void unlock_mount(struct mountpoint
*where
)
1757 struct dentry
*dentry
= where
->m_dentry
;
1758 put_mountpoint(where
);
1760 mutex_unlock(&dentry
->d_inode
->i_mutex
);
1763 static int graft_tree(struct mount
*mnt
, struct mount
*p
, struct mountpoint
*mp
)
1765 if (mnt
->mnt
.mnt_sb
->s_flags
& MS_NOUSER
)
1768 if (S_ISDIR(mp
->m_dentry
->d_inode
->i_mode
) !=
1769 S_ISDIR(mnt
->mnt
.mnt_root
->d_inode
->i_mode
))
1772 return attach_recursive_mnt(mnt
, p
, mp
, NULL
);
1776 * Sanity check the flags to change_mnt_propagation.
1779 static int flags_to_propagation_type(int flags
)
1781 int type
= flags
& ~(MS_REC
| MS_SILENT
);
1783 /* Fail if any non-propagation flags are set */
1784 if (type
& ~(MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
1786 /* Only one propagation flag should be set */
1787 if (!is_power_of_2(type
))
1793 * recursively change the type of the mountpoint.
1795 static int do_change_type(struct path
*path
, int flag
)
1798 struct mount
*mnt
= real_mount(path
->mnt
);
1799 int recurse
= flag
& MS_REC
;
1803 if (path
->dentry
!= path
->mnt
->mnt_root
)
1806 type
= flags_to_propagation_type(flag
);
1811 if (type
== MS_SHARED
) {
1812 err
= invent_group_ids(mnt
, recurse
);
1818 for (m
= mnt
; m
; m
= (recurse
? next_mnt(m
, mnt
) : NULL
))
1819 change_mnt_propagation(m
, type
);
1820 unlock_mount_hash();
1827 static bool has_locked_children(struct mount
*mnt
, struct dentry
*dentry
)
1829 struct mount
*child
;
1830 list_for_each_entry(child
, &mnt
->mnt_mounts
, mnt_child
) {
1831 if (!is_subdir(child
->mnt_mountpoint
, dentry
))
1834 if (child
->mnt
.mnt_flags
& MNT_LOCKED
)
1841 * do loopback mount.
1843 static int do_loopback(struct path
*path
, const char *old_name
,
1846 struct path old_path
;
1847 struct mount
*mnt
= NULL
, *old
, *parent
;
1848 struct mountpoint
*mp
;
1850 if (!old_name
|| !*old_name
)
1852 err
= kern_path(old_name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &old_path
);
1857 if (mnt_ns_loop(old_path
.dentry
))
1860 mp
= lock_mount(path
);
1865 old
= real_mount(old_path
.mnt
);
1866 parent
= real_mount(path
->mnt
);
1869 if (IS_MNT_UNBINDABLE(old
))
1872 if (!check_mnt(parent
) || !check_mnt(old
))
1875 if (!recurse
&& has_locked_children(old
, old_path
.dentry
))
1879 mnt
= copy_tree(old
, old_path
.dentry
, CL_COPY_MNT_NS_FILE
);
1881 mnt
= clone_mnt(old
, old_path
.dentry
, 0);
1888 mnt
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
1890 err
= graft_tree(mnt
, parent
, mp
);
1893 umount_tree(mnt
, 0);
1894 unlock_mount_hash();
1899 path_put(&old_path
);
1903 static int change_mount_flags(struct vfsmount
*mnt
, int ms_flags
)
1906 int readonly_request
= 0;
1908 if (ms_flags
& MS_RDONLY
)
1909 readonly_request
= 1;
1910 if (readonly_request
== __mnt_is_readonly(mnt
))
1913 if (readonly_request
)
1914 error
= mnt_make_readonly(real_mount(mnt
));
1916 __mnt_unmake_readonly(real_mount(mnt
));
1921 * change filesystem flags. dir should be a physical root of filesystem.
1922 * If you've mounted a non-root directory somewhere and want to do remount
1923 * on it - tough luck.
1925 static int do_remount(struct path
*path
, int flags
, int mnt_flags
,
1929 struct super_block
*sb
= path
->mnt
->mnt_sb
;
1930 struct mount
*mnt
= real_mount(path
->mnt
);
1932 if (!check_mnt(mnt
))
1935 if (path
->dentry
!= path
->mnt
->mnt_root
)
1938 /* Don't allow changing of locked mnt flags.
1940 * No locks need to be held here while testing the various
1941 * MNT_LOCK flags because those flags can never be cleared
1942 * once they are set.
1944 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_READONLY
) &&
1945 !(mnt_flags
& MNT_READONLY
)) {
1948 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_NODEV
) &&
1949 !(mnt_flags
& MNT_NODEV
)) {
1952 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_NOSUID
) &&
1953 !(mnt_flags
& MNT_NOSUID
)) {
1956 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_NOEXEC
) &&
1957 !(mnt_flags
& MNT_NOEXEC
)) {
1960 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_ATIME
) &&
1961 ((mnt
->mnt
.mnt_flags
& MNT_ATIME_MASK
) != (mnt_flags
& MNT_ATIME_MASK
))) {
1965 err
= security_sb_remount(sb
, data
);
1969 down_write(&sb
->s_umount
);
1970 if (flags
& MS_BIND
)
1971 err
= change_mount_flags(path
->mnt
, flags
);
1972 else if (!capable(CAP_SYS_ADMIN
))
1975 err
= do_remount_sb(sb
, flags
, data
, 0);
1978 mnt_flags
|= mnt
->mnt
.mnt_flags
& ~MNT_USER_SETTABLE_MASK
;
1979 mnt
->mnt
.mnt_flags
= mnt_flags
;
1980 touch_mnt_namespace(mnt
->mnt_ns
);
1981 unlock_mount_hash();
1983 up_write(&sb
->s_umount
);
1987 static inline int tree_contains_unbindable(struct mount
*mnt
)
1990 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1991 if (IS_MNT_UNBINDABLE(p
))
1997 static int do_move_mount(struct path
*path
, const char *old_name
)
1999 struct path old_path
, parent_path
;
2002 struct mountpoint
*mp
;
2004 if (!old_name
|| !*old_name
)
2006 err
= kern_path(old_name
, LOOKUP_FOLLOW
, &old_path
);
2010 mp
= lock_mount(path
);
2015 old
= real_mount(old_path
.mnt
);
2016 p
= real_mount(path
->mnt
);
2019 if (!check_mnt(p
) || !check_mnt(old
))
2022 if (old
->mnt
.mnt_flags
& MNT_LOCKED
)
2026 if (old_path
.dentry
!= old_path
.mnt
->mnt_root
)
2029 if (!mnt_has_parent(old
))
2032 if (S_ISDIR(path
->dentry
->d_inode
->i_mode
) !=
2033 S_ISDIR(old_path
.dentry
->d_inode
->i_mode
))
2036 * Don't move a mount residing in a shared parent.
2038 if (IS_MNT_SHARED(old
->mnt_parent
))
2041 * Don't move a mount tree containing unbindable mounts to a destination
2042 * mount which is shared.
2044 if (IS_MNT_SHARED(p
) && tree_contains_unbindable(old
))
2047 for (; mnt_has_parent(p
); p
= p
->mnt_parent
)
2051 err
= attach_recursive_mnt(old
, real_mount(path
->mnt
), mp
, &parent_path
);
2055 /* if the mount is moved, it should no longer be expire
2057 list_del_init(&old
->mnt_expire
);
2062 path_put(&parent_path
);
2063 path_put(&old_path
);
2067 static struct vfsmount
*fs_set_subtype(struct vfsmount
*mnt
, const char *fstype
)
2070 const char *subtype
= strchr(fstype
, '.');
2079 mnt
->mnt_sb
->s_subtype
= kstrdup(subtype
, GFP_KERNEL
);
2081 if (!mnt
->mnt_sb
->s_subtype
)
2087 return ERR_PTR(err
);
2091 * add a mount into a namespace's mount tree
2093 static int do_add_mount(struct mount
*newmnt
, struct path
*path
, int mnt_flags
)
2095 struct mountpoint
*mp
;
2096 struct mount
*parent
;
2099 mnt_flags
&= ~MNT_INTERNAL_FLAGS
;
2101 mp
= lock_mount(path
);
2105 parent
= real_mount(path
->mnt
);
2107 if (unlikely(!check_mnt(parent
))) {
2108 /* that's acceptable only for automounts done in private ns */
2109 if (!(mnt_flags
& MNT_SHRINKABLE
))
2111 /* ... and for those we'd better have mountpoint still alive */
2112 if (!parent
->mnt_ns
)
2116 /* Refuse the same filesystem on the same mount point */
2118 if (path
->mnt
->mnt_sb
== newmnt
->mnt
.mnt_sb
&&
2119 path
->mnt
->mnt_root
== path
->dentry
)
2123 if (S_ISLNK(newmnt
->mnt
.mnt_root
->d_inode
->i_mode
))
2126 newmnt
->mnt
.mnt_flags
= mnt_flags
;
2127 err
= graft_tree(newmnt
, parent
, mp
);
2135 * create a new mount for userspace and request it to be added into the
2138 static int do_new_mount(struct path
*path
, const char *fstype
, int flags
,
2139 int mnt_flags
, const char *name
, void *data
)
2141 struct file_system_type
*type
;
2142 struct user_namespace
*user_ns
= current
->nsproxy
->mnt_ns
->user_ns
;
2143 struct vfsmount
*mnt
;
2149 type
= get_fs_type(fstype
);
2153 if (user_ns
!= &init_user_ns
) {
2154 if (!(type
->fs_flags
& FS_USERNS_MOUNT
)) {
2155 put_filesystem(type
);
2158 /* Only in special cases allow devices from mounts
2159 * created outside the initial user namespace.
2161 if (!(type
->fs_flags
& FS_USERNS_DEV_MOUNT
)) {
2163 mnt_flags
|= MNT_NODEV
| MNT_LOCK_NODEV
;
2167 mnt
= vfs_kern_mount(type
, flags
, name
, data
);
2168 if (!IS_ERR(mnt
) && (type
->fs_flags
& FS_HAS_SUBTYPE
) &&
2169 !mnt
->mnt_sb
->s_subtype
)
2170 mnt
= fs_set_subtype(mnt
, fstype
);
2172 put_filesystem(type
);
2174 return PTR_ERR(mnt
);
2176 err
= do_add_mount(real_mount(mnt
), path
, mnt_flags
);
2182 int finish_automount(struct vfsmount
*m
, struct path
*path
)
2184 struct mount
*mnt
= real_mount(m
);
2186 /* The new mount record should have at least 2 refs to prevent it being
2187 * expired before we get a chance to add it
2189 BUG_ON(mnt_get_count(mnt
) < 2);
2191 if (m
->mnt_sb
== path
->mnt
->mnt_sb
&&
2192 m
->mnt_root
== path
->dentry
) {
2197 err
= do_add_mount(mnt
, path
, path
->mnt
->mnt_flags
| MNT_SHRINKABLE
);
2201 /* remove m from any expiration list it may be on */
2202 if (!list_empty(&mnt
->mnt_expire
)) {
2204 list_del_init(&mnt
->mnt_expire
);
2213 * mnt_set_expiry - Put a mount on an expiration list
2214 * @mnt: The mount to list.
2215 * @expiry_list: The list to add the mount to.
2217 void mnt_set_expiry(struct vfsmount
*mnt
, struct list_head
*expiry_list
)
2221 list_add_tail(&real_mount(mnt
)->mnt_expire
, expiry_list
);
2225 EXPORT_SYMBOL(mnt_set_expiry
);
2228 * process a list of expirable mountpoints with the intent of discarding any
2229 * mountpoints that aren't in use and haven't been touched since last we came
2232 void mark_mounts_for_expiry(struct list_head
*mounts
)
2234 struct mount
*mnt
, *next
;
2235 LIST_HEAD(graveyard
);
2237 if (list_empty(mounts
))
2243 /* extract from the expiration list every vfsmount that matches the
2244 * following criteria:
2245 * - only referenced by its parent vfsmount
2246 * - still marked for expiry (marked on the last call here; marks are
2247 * cleared by mntput())
2249 list_for_each_entry_safe(mnt
, next
, mounts
, mnt_expire
) {
2250 if (!xchg(&mnt
->mnt_expiry_mark
, 1) ||
2251 propagate_mount_busy(mnt
, 1))
2253 list_move(&mnt
->mnt_expire
, &graveyard
);
2255 while (!list_empty(&graveyard
)) {
2256 mnt
= list_first_entry(&graveyard
, struct mount
, mnt_expire
);
2257 touch_mnt_namespace(mnt
->mnt_ns
);
2258 umount_tree(mnt
, 1);
2260 unlock_mount_hash();
2264 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry
);
2267 * Ripoff of 'select_parent()'
2269 * search the list of submounts for a given mountpoint, and move any
2270 * shrinkable submounts to the 'graveyard' list.
2272 static int select_submounts(struct mount
*parent
, struct list_head
*graveyard
)
2274 struct mount
*this_parent
= parent
;
2275 struct list_head
*next
;
2279 next
= this_parent
->mnt_mounts
.next
;
2281 while (next
!= &this_parent
->mnt_mounts
) {
2282 struct list_head
*tmp
= next
;
2283 struct mount
*mnt
= list_entry(tmp
, struct mount
, mnt_child
);
2286 if (!(mnt
->mnt
.mnt_flags
& MNT_SHRINKABLE
))
2289 * Descend a level if the d_mounts list is non-empty.
2291 if (!list_empty(&mnt
->mnt_mounts
)) {
2296 if (!propagate_mount_busy(mnt
, 1)) {
2297 list_move_tail(&mnt
->mnt_expire
, graveyard
);
2302 * All done at this level ... ascend and resume the search
2304 if (this_parent
!= parent
) {
2305 next
= this_parent
->mnt_child
.next
;
2306 this_parent
= this_parent
->mnt_parent
;
2313 * process a list of expirable mountpoints with the intent of discarding any
2314 * submounts of a specific parent mountpoint
2316 * mount_lock must be held for write
2318 static void shrink_submounts(struct mount
*mnt
)
2320 LIST_HEAD(graveyard
);
2323 /* extract submounts of 'mountpoint' from the expiration list */
2324 while (select_submounts(mnt
, &graveyard
)) {
2325 while (!list_empty(&graveyard
)) {
2326 m
= list_first_entry(&graveyard
, struct mount
,
2328 touch_mnt_namespace(m
->mnt_ns
);
2335 * Some copy_from_user() implementations do not return the exact number of
2336 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
2337 * Note that this function differs from copy_from_user() in that it will oops
2338 * on bad values of `to', rather than returning a short copy.
2340 static long exact_copy_from_user(void *to
, const void __user
* from
,
2344 const char __user
*f
= from
;
2347 if (!access_ok(VERIFY_READ
, from
, n
))
2351 if (__get_user(c
, f
)) {
2362 int copy_mount_options(const void __user
* data
, unsigned long *where
)
2372 if (!(page
= __get_free_page(GFP_KERNEL
)))
2375 /* We only care that *some* data at the address the user
2376 * gave us is valid. Just in case, we'll zero
2377 * the remainder of the page.
2379 /* copy_from_user cannot cross TASK_SIZE ! */
2380 size
= TASK_SIZE
- (unsigned long)data
;
2381 if (size
> PAGE_SIZE
)
2384 i
= size
- exact_copy_from_user((void *)page
, data
, size
);
2390 memset((char *)page
+ i
, 0, PAGE_SIZE
- i
);
2395 int copy_mount_string(const void __user
*data
, char **where
)
2404 tmp
= strndup_user(data
, PAGE_SIZE
);
2406 return PTR_ERR(tmp
);
2413 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2414 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2416 * data is a (void *) that can point to any structure up to
2417 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2418 * information (or be NULL).
2420 * Pre-0.97 versions of mount() didn't have a flags word.
2421 * When the flags word was introduced its top half was required
2422 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2423 * Therefore, if this magic number is present, it carries no information
2424 * and must be discarded.
2426 long do_mount(const char *dev_name
, const char *dir_name
,
2427 const char *type_page
, unsigned long flags
, void *data_page
)
2434 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
2435 flags
&= ~MS_MGC_MSK
;
2437 /* Basic sanity checks */
2439 if (!dir_name
|| !*dir_name
|| !memchr(dir_name
, 0, PAGE_SIZE
))
2443 ((char *)data_page
)[PAGE_SIZE
- 1] = 0;
2445 /* ... and get the mountpoint */
2446 retval
= kern_path(dir_name
, LOOKUP_FOLLOW
, &path
);
2450 retval
= security_sb_mount(dev_name
, &path
,
2451 type_page
, flags
, data_page
);
2452 if (!retval
&& !may_mount())
2457 /* Default to relatime unless overriden */
2458 if (!(flags
& MS_NOATIME
))
2459 mnt_flags
|= MNT_RELATIME
;
2461 /* Separate the per-mountpoint flags */
2462 if (flags
& MS_NOSUID
)
2463 mnt_flags
|= MNT_NOSUID
;
2464 if (flags
& MS_NODEV
)
2465 mnt_flags
|= MNT_NODEV
;
2466 if (flags
& MS_NOEXEC
)
2467 mnt_flags
|= MNT_NOEXEC
;
2468 if (flags
& MS_NOATIME
)
2469 mnt_flags
|= MNT_NOATIME
;
2470 if (flags
& MS_NODIRATIME
)
2471 mnt_flags
|= MNT_NODIRATIME
;
2472 if (flags
& MS_STRICTATIME
)
2473 mnt_flags
&= ~(MNT_RELATIME
| MNT_NOATIME
);
2474 if (flags
& MS_RDONLY
)
2475 mnt_flags
|= MNT_READONLY
;
2477 /* The default atime for remount is preservation */
2478 if ((flags
& MS_REMOUNT
) &&
2479 ((flags
& (MS_NOATIME
| MS_NODIRATIME
| MS_RELATIME
|
2480 MS_STRICTATIME
)) == 0)) {
2481 mnt_flags
&= ~MNT_ATIME_MASK
;
2482 mnt_flags
|= path
.mnt
->mnt_flags
& MNT_ATIME_MASK
;
2485 flags
&= ~(MS_NOSUID
| MS_NOEXEC
| MS_NODEV
| MS_ACTIVE
| MS_BORN
|
2486 MS_NOATIME
| MS_NODIRATIME
| MS_RELATIME
| MS_KERNMOUNT
|
2489 if (flags
& MS_REMOUNT
)
2490 retval
= do_remount(&path
, flags
& ~MS_REMOUNT
, mnt_flags
,
2492 else if (flags
& MS_BIND
)
2493 retval
= do_loopback(&path
, dev_name
, flags
& MS_REC
);
2494 else if (flags
& (MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
2495 retval
= do_change_type(&path
, flags
);
2496 else if (flags
& MS_MOVE
)
2497 retval
= do_move_mount(&path
, dev_name
);
2499 retval
= do_new_mount(&path
, type_page
, flags
, mnt_flags
,
2500 dev_name
, data_page
);
2506 static void free_mnt_ns(struct mnt_namespace
*ns
)
2508 proc_free_inum(ns
->proc_inum
);
2509 put_user_ns(ns
->user_ns
);
2514 * Assign a sequence number so we can detect when we attempt to bind
2515 * mount a reference to an older mount namespace into the current
2516 * mount namespace, preventing reference counting loops. A 64bit
2517 * number incrementing at 10Ghz will take 12,427 years to wrap which
2518 * is effectively never, so we can ignore the possibility.
2520 static atomic64_t mnt_ns_seq
= ATOMIC64_INIT(1);
2522 static struct mnt_namespace
*alloc_mnt_ns(struct user_namespace
*user_ns
)
2524 struct mnt_namespace
*new_ns
;
2527 new_ns
= kmalloc(sizeof(struct mnt_namespace
), GFP_KERNEL
);
2529 return ERR_PTR(-ENOMEM
);
2530 ret
= proc_alloc_inum(&new_ns
->proc_inum
);
2533 return ERR_PTR(ret
);
2535 new_ns
->seq
= atomic64_add_return(1, &mnt_ns_seq
);
2536 atomic_set(&new_ns
->count
, 1);
2537 new_ns
->root
= NULL
;
2538 INIT_LIST_HEAD(&new_ns
->list
);
2539 init_waitqueue_head(&new_ns
->poll
);
2541 new_ns
->user_ns
= get_user_ns(user_ns
);
2545 struct mnt_namespace
*copy_mnt_ns(unsigned long flags
, struct mnt_namespace
*ns
,
2546 struct user_namespace
*user_ns
, struct fs_struct
*new_fs
)
2548 struct mnt_namespace
*new_ns
;
2549 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
;
2550 struct mount
*p
, *q
;
2557 if (likely(!(flags
& CLONE_NEWNS
))) {
2564 new_ns
= alloc_mnt_ns(user_ns
);
2569 /* First pass: copy the tree topology */
2570 copy_flags
= CL_COPY_UNBINDABLE
| CL_EXPIRE
;
2571 if (user_ns
!= ns
->user_ns
)
2572 copy_flags
|= CL_SHARED_TO_SLAVE
| CL_UNPRIVILEGED
;
2573 new = copy_tree(old
, old
->mnt
.mnt_root
, copy_flags
);
2576 free_mnt_ns(new_ns
);
2577 return ERR_CAST(new);
2580 list_add_tail(&new_ns
->list
, &new->mnt_list
);
2583 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2584 * as belonging to new namespace. We have already acquired a private
2585 * fs_struct, so tsk->fs->lock is not needed.
2592 if (&p
->mnt
== new_fs
->root
.mnt
) {
2593 new_fs
->root
.mnt
= mntget(&q
->mnt
);
2596 if (&p
->mnt
== new_fs
->pwd
.mnt
) {
2597 new_fs
->pwd
.mnt
= mntget(&q
->mnt
);
2601 p
= next_mnt(p
, old
);
2602 q
= next_mnt(q
, new);
2605 while (p
->mnt
.mnt_root
!= q
->mnt
.mnt_root
)
2606 p
= next_mnt(p
, old
);
2619 * create_mnt_ns - creates a private namespace and adds a root filesystem
2620 * @mnt: pointer to the new root filesystem mountpoint
2622 static struct mnt_namespace
*create_mnt_ns(struct vfsmount
*m
)
2624 struct mnt_namespace
*new_ns
= alloc_mnt_ns(&init_user_ns
);
2625 if (!IS_ERR(new_ns
)) {
2626 struct mount
*mnt
= real_mount(m
);
2627 mnt
->mnt_ns
= new_ns
;
2629 list_add(&mnt
->mnt_list
, &new_ns
->list
);
2636 struct dentry
*mount_subtree(struct vfsmount
*mnt
, const char *name
)
2638 struct mnt_namespace
*ns
;
2639 struct super_block
*s
;
2643 ns
= create_mnt_ns(mnt
);
2645 return ERR_CAST(ns
);
2647 err
= vfs_path_lookup(mnt
->mnt_root
, mnt
,
2648 name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &path
);
2653 return ERR_PTR(err
);
2655 /* trade a vfsmount reference for active sb one */
2656 s
= path
.mnt
->mnt_sb
;
2657 atomic_inc(&s
->s_active
);
2659 /* lock the sucker */
2660 down_write(&s
->s_umount
);
2661 /* ... and return the root of (sub)tree on it */
2664 EXPORT_SYMBOL(mount_subtree
);
2666 SYSCALL_DEFINE5(mount
, char __user
*, dev_name
, char __user
*, dir_name
,
2667 char __user
*, type
, unsigned long, flags
, void __user
*, data
)
2671 struct filename
*kernel_dir
;
2673 unsigned long data_page
;
2675 ret
= copy_mount_string(type
, &kernel_type
);
2679 kernel_dir
= getname(dir_name
);
2680 if (IS_ERR(kernel_dir
)) {
2681 ret
= PTR_ERR(kernel_dir
);
2685 ret
= copy_mount_string(dev_name
, &kernel_dev
);
2689 ret
= copy_mount_options(data
, &data_page
);
2693 ret
= do_mount(kernel_dev
, kernel_dir
->name
, kernel_type
, flags
,
2694 (void *) data_page
);
2696 free_page(data_page
);
2700 putname(kernel_dir
);
2708 * Return true if path is reachable from root
2710 * namespace_sem or mount_lock is held
2712 bool is_path_reachable(struct mount
*mnt
, struct dentry
*dentry
,
2713 const struct path
*root
)
2715 while (&mnt
->mnt
!= root
->mnt
&& mnt_has_parent(mnt
)) {
2716 dentry
= mnt
->mnt_mountpoint
;
2717 mnt
= mnt
->mnt_parent
;
2719 return &mnt
->mnt
== root
->mnt
&& is_subdir(dentry
, root
->dentry
);
2722 int path_is_under(struct path
*path1
, struct path
*path2
)
2725 read_seqlock_excl(&mount_lock
);
2726 res
= is_path_reachable(real_mount(path1
->mnt
), path1
->dentry
, path2
);
2727 read_sequnlock_excl(&mount_lock
);
2730 EXPORT_SYMBOL(path_is_under
);
2733 * pivot_root Semantics:
2734 * Moves the root file system of the current process to the directory put_old,
2735 * makes new_root as the new root file system of the current process, and sets
2736 * root/cwd of all processes which had them on the current root to new_root.
2739 * The new_root and put_old must be directories, and must not be on the
2740 * same file system as the current process root. The put_old must be
2741 * underneath new_root, i.e. adding a non-zero number of /.. to the string
2742 * pointed to by put_old must yield the same directory as new_root. No other
2743 * file system may be mounted on put_old. After all, new_root is a mountpoint.
2745 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2746 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2747 * in this situation.
2750 * - we don't move root/cwd if they are not at the root (reason: if something
2751 * cared enough to change them, it's probably wrong to force them elsewhere)
2752 * - it's okay to pick a root that isn't the root of a file system, e.g.
2753 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2754 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2757 SYSCALL_DEFINE2(pivot_root
, const char __user
*, new_root
,
2758 const char __user
*, put_old
)
2760 struct path
new, old
, parent_path
, root_parent
, root
;
2761 struct mount
*new_mnt
, *root_mnt
, *old_mnt
;
2762 struct mountpoint
*old_mp
, *root_mp
;
2768 error
= user_path_dir(new_root
, &new);
2772 error
= user_path_dir(put_old
, &old
);
2776 error
= security_sb_pivotroot(&old
, &new);
2780 get_fs_root(current
->fs
, &root
);
2781 old_mp
= lock_mount(&old
);
2782 error
= PTR_ERR(old_mp
);
2787 new_mnt
= real_mount(new.mnt
);
2788 root_mnt
= real_mount(root
.mnt
);
2789 old_mnt
= real_mount(old
.mnt
);
2790 if (IS_MNT_SHARED(old_mnt
) ||
2791 IS_MNT_SHARED(new_mnt
->mnt_parent
) ||
2792 IS_MNT_SHARED(root_mnt
->mnt_parent
))
2794 if (!check_mnt(root_mnt
) || !check_mnt(new_mnt
))
2796 if (new_mnt
->mnt
.mnt_flags
& MNT_LOCKED
)
2799 if (d_unlinked(new.dentry
))
2802 if (new_mnt
== root_mnt
|| old_mnt
== root_mnt
)
2803 goto out4
; /* loop, on the same file system */
2805 if (root
.mnt
->mnt_root
!= root
.dentry
)
2806 goto out4
; /* not a mountpoint */
2807 if (!mnt_has_parent(root_mnt
))
2808 goto out4
; /* not attached */
2809 root_mp
= root_mnt
->mnt_mp
;
2810 if (new.mnt
->mnt_root
!= new.dentry
)
2811 goto out4
; /* not a mountpoint */
2812 if (!mnt_has_parent(new_mnt
))
2813 goto out4
; /* not attached */
2814 /* make sure we can reach put_old from new_root */
2815 if (!is_path_reachable(old_mnt
, old
.dentry
, &new))
2817 root_mp
->m_count
++; /* pin it so it won't go away */
2819 detach_mnt(new_mnt
, &parent_path
);
2820 detach_mnt(root_mnt
, &root_parent
);
2821 if (root_mnt
->mnt
.mnt_flags
& MNT_LOCKED
) {
2822 new_mnt
->mnt
.mnt_flags
|= MNT_LOCKED
;
2823 root_mnt
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
2825 /* mount old root on put_old */
2826 attach_mnt(root_mnt
, old_mnt
, old_mp
);
2827 /* mount new_root on / */
2828 attach_mnt(new_mnt
, real_mount(root_parent
.mnt
), root_mp
);
2829 touch_mnt_namespace(current
->nsproxy
->mnt_ns
);
2830 unlock_mount_hash();
2831 chroot_fs_refs(&root
, &new);
2832 put_mountpoint(root_mp
);
2835 unlock_mount(old_mp
);
2837 path_put(&root_parent
);
2838 path_put(&parent_path
);
2850 static void __init
init_mount_tree(void)
2852 struct vfsmount
*mnt
;
2853 struct mnt_namespace
*ns
;
2855 struct file_system_type
*type
;
2857 type
= get_fs_type("rootfs");
2859 panic("Can't find rootfs type");
2860 mnt
= vfs_kern_mount(type
, 0, "rootfs", NULL
);
2861 put_filesystem(type
);
2863 panic("Can't create rootfs");
2865 ns
= create_mnt_ns(mnt
);
2867 panic("Can't allocate initial namespace");
2869 init_task
.nsproxy
->mnt_ns
= ns
;
2873 root
.dentry
= mnt
->mnt_root
;
2875 set_fs_pwd(current
->fs
, &root
);
2876 set_fs_root(current
->fs
, &root
);
2879 void __init
mnt_init(void)
2884 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct mount
),
2885 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
);
2887 mount_hashtable
= alloc_large_system_hash("Mount-cache",
2888 sizeof(struct hlist_head
),
2891 &m_hash_shift
, &m_hash_mask
, 0, 0);
2892 mountpoint_hashtable
= alloc_large_system_hash("Mountpoint-cache",
2893 sizeof(struct hlist_head
),
2896 &mp_hash_shift
, &mp_hash_mask
, 0, 0);
2898 if (!mount_hashtable
|| !mountpoint_hashtable
)
2899 panic("Failed to allocate mount hash table\n");
2901 for (u
= 0; u
<= m_hash_mask
; u
++)
2902 INIT_HLIST_HEAD(&mount_hashtable
[u
]);
2903 for (u
= 0; u
<= mp_hash_mask
; u
++)
2904 INIT_HLIST_HEAD(&mountpoint_hashtable
[u
]);
2910 printk(KERN_WARNING
"%s: sysfs_init error: %d\n",
2912 fs_kobj
= kobject_create_and_add("fs", NULL
);
2914 printk(KERN_WARNING
"%s: kobj create error\n", __func__
);
2919 void put_mnt_ns(struct mnt_namespace
*ns
)
2921 if (!atomic_dec_and_test(&ns
->count
))
2923 drop_collected_mounts(&ns
->root
->mnt
);
2927 struct vfsmount
*kern_mount_data(struct file_system_type
*type
, void *data
)
2929 struct vfsmount
*mnt
;
2930 mnt
= vfs_kern_mount(type
, MS_KERNMOUNT
, type
->name
, data
);
2933 * it is a longterm mount, don't release mnt until
2934 * we unmount before file sys is unregistered
2936 real_mount(mnt
)->mnt_ns
= MNT_NS_INTERNAL
;
2940 EXPORT_SYMBOL_GPL(kern_mount_data
);
2942 void kern_unmount(struct vfsmount
*mnt
)
2944 /* release long term mount so mount point can be released */
2945 if (!IS_ERR_OR_NULL(mnt
)) {
2946 real_mount(mnt
)->mnt_ns
= NULL
;
2947 synchronize_rcu(); /* yecchhh... */
2951 EXPORT_SYMBOL(kern_unmount
);
2953 bool our_mnt(struct vfsmount
*mnt
)
2955 return check_mnt(real_mount(mnt
));
2958 bool current_chrooted(void)
2960 /* Does the current process have a non-standard root */
2961 struct path ns_root
;
2962 struct path fs_root
;
2965 /* Find the namespace root */
2966 ns_root
.mnt
= ¤t
->nsproxy
->mnt_ns
->root
->mnt
;
2967 ns_root
.dentry
= ns_root
.mnt
->mnt_root
;
2969 while (d_mountpoint(ns_root
.dentry
) && follow_down_one(&ns_root
))
2972 get_fs_root(current
->fs
, &fs_root
);
2974 chrooted
= !path_equal(&fs_root
, &ns_root
);
2982 bool fs_fully_visible(struct file_system_type
*type
)
2984 struct mnt_namespace
*ns
= current
->nsproxy
->mnt_ns
;
2986 bool visible
= false;
2991 down_read(&namespace_sem
);
2992 list_for_each_entry(mnt
, &ns
->list
, mnt_list
) {
2993 struct mount
*child
;
2994 if (mnt
->mnt
.mnt_sb
->s_type
!= type
)
2997 /* This mount is not fully visible if there are any child mounts
2998 * that cover anything except for empty directories.
3000 list_for_each_entry(child
, &mnt
->mnt_mounts
, mnt_child
) {
3001 struct inode
*inode
= child
->mnt_mountpoint
->d_inode
;
3002 if (!S_ISDIR(inode
->i_mode
))
3004 if (inode
->i_nlink
> 2)
3012 up_read(&namespace_sem
);
3016 static void *mntns_get(struct task_struct
*task
)
3018 struct mnt_namespace
*ns
= NULL
;
3019 struct nsproxy
*nsproxy
;
3022 nsproxy
= task
->nsproxy
;
3024 ns
= nsproxy
->mnt_ns
;
3032 static void mntns_put(void *ns
)
3037 static int mntns_install(struct nsproxy
*nsproxy
, void *ns
)
3039 struct fs_struct
*fs
= current
->fs
;
3040 struct mnt_namespace
*mnt_ns
= ns
;
3043 if (!ns_capable(mnt_ns
->user_ns
, CAP_SYS_ADMIN
) ||
3044 !ns_capable(current_user_ns(), CAP_SYS_CHROOT
) ||
3045 !ns_capable(current_user_ns(), CAP_SYS_ADMIN
))
3052 put_mnt_ns(nsproxy
->mnt_ns
);
3053 nsproxy
->mnt_ns
= mnt_ns
;
3056 root
.mnt
= &mnt_ns
->root
->mnt
;
3057 root
.dentry
= mnt_ns
->root
->mnt
.mnt_root
;
3059 while(d_mountpoint(root
.dentry
) && follow_down_one(&root
))
3062 /* Update the pwd and root */
3063 set_fs_pwd(fs
, &root
);
3064 set_fs_root(fs
, &root
);
3070 static unsigned int mntns_inum(void *ns
)
3072 struct mnt_namespace
*mnt_ns
= ns
;
3073 return mnt_ns
->proc_inum
;
3076 const struct proc_ns_operations mntns_operations
= {
3078 .type
= CLONE_NEWNS
,
3081 .install
= mntns_install
,