]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - fs/namespace.c
Merge tag 'iommu-updates-v4.19' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-eoan-kernel.git] / fs / namespace.c
1 /*
2 * linux/fs/namespace.c
3 *
4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
6 *
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
8 * Heavily rewritten.
9 */
10
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/uaccess.h>
24 #include <linux/proc_ns.h>
25 #include <linux/magic.h>
26 #include <linux/bootmem.h>
27 #include <linux/task_work.h>
28 #include <linux/sched/task.h>
29
30 #include "pnode.h"
31 #include "internal.h"
32
33 /* Maximum number of mounts in a mount namespace */
34 unsigned int sysctl_mount_max __read_mostly = 100000;
35
36 static unsigned int m_hash_mask __read_mostly;
37 static unsigned int m_hash_shift __read_mostly;
38 static unsigned int mp_hash_mask __read_mostly;
39 static unsigned int mp_hash_shift __read_mostly;
40
41 static __initdata unsigned long mhash_entries;
42 static int __init set_mhash_entries(char *str)
43 {
44 if (!str)
45 return 0;
46 mhash_entries = simple_strtoul(str, &str, 0);
47 return 1;
48 }
49 __setup("mhash_entries=", set_mhash_entries);
50
51 static __initdata unsigned long mphash_entries;
52 static int __init set_mphash_entries(char *str)
53 {
54 if (!str)
55 return 0;
56 mphash_entries = simple_strtoul(str, &str, 0);
57 return 1;
58 }
59 __setup("mphash_entries=", set_mphash_entries);
60
61 static u64 event;
62 static DEFINE_IDA(mnt_id_ida);
63 static DEFINE_IDA(mnt_group_ida);
64 static DEFINE_SPINLOCK(mnt_id_lock);
65 static int mnt_id_start = 0;
66 static int mnt_group_start = 1;
67
68 static struct hlist_head *mount_hashtable __read_mostly;
69 static struct hlist_head *mountpoint_hashtable __read_mostly;
70 static struct kmem_cache *mnt_cache __read_mostly;
71 static DECLARE_RWSEM(namespace_sem);
72
73 /* /sys/fs */
74 struct kobject *fs_kobj;
75 EXPORT_SYMBOL_GPL(fs_kobj);
76
77 /*
78 * vfsmount lock may be taken for read to prevent changes to the
79 * vfsmount hash, ie. during mountpoint lookups or walking back
80 * up the tree.
81 *
82 * It should be taken for write in all cases where the vfsmount
83 * tree or hash is modified or when a vfsmount structure is modified.
84 */
85 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
86
87 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
88 {
89 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
90 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
91 tmp = tmp + (tmp >> m_hash_shift);
92 return &mount_hashtable[tmp & m_hash_mask];
93 }
94
95 static inline struct hlist_head *mp_hash(struct dentry *dentry)
96 {
97 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
98 tmp = tmp + (tmp >> mp_hash_shift);
99 return &mountpoint_hashtable[tmp & mp_hash_mask];
100 }
101
102 static int mnt_alloc_id(struct mount *mnt)
103 {
104 int res;
105
106 retry:
107 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
108 spin_lock(&mnt_id_lock);
109 res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
110 if (!res)
111 mnt_id_start = mnt->mnt_id + 1;
112 spin_unlock(&mnt_id_lock);
113 if (res == -EAGAIN)
114 goto retry;
115
116 return res;
117 }
118
119 static void mnt_free_id(struct mount *mnt)
120 {
121 int id = mnt->mnt_id;
122 spin_lock(&mnt_id_lock);
123 ida_remove(&mnt_id_ida, id);
124 if (mnt_id_start > id)
125 mnt_id_start = id;
126 spin_unlock(&mnt_id_lock);
127 }
128
129 /*
130 * Allocate a new peer group ID
131 *
132 * mnt_group_ida is protected by namespace_sem
133 */
134 static int mnt_alloc_group_id(struct mount *mnt)
135 {
136 int res;
137
138 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
139 return -ENOMEM;
140
141 res = ida_get_new_above(&mnt_group_ida,
142 mnt_group_start,
143 &mnt->mnt_group_id);
144 if (!res)
145 mnt_group_start = mnt->mnt_group_id + 1;
146
147 return res;
148 }
149
150 /*
151 * Release a peer group ID
152 */
153 void mnt_release_group_id(struct mount *mnt)
154 {
155 int id = mnt->mnt_group_id;
156 ida_remove(&mnt_group_ida, id);
157 if (mnt_group_start > id)
158 mnt_group_start = id;
159 mnt->mnt_group_id = 0;
160 }
161
162 /*
163 * vfsmount lock must be held for read
164 */
165 static inline void mnt_add_count(struct mount *mnt, int n)
166 {
167 #ifdef CONFIG_SMP
168 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
169 #else
170 preempt_disable();
171 mnt->mnt_count += n;
172 preempt_enable();
173 #endif
174 }
175
176 /*
177 * vfsmount lock must be held for write
178 */
179 unsigned int mnt_get_count(struct mount *mnt)
180 {
181 #ifdef CONFIG_SMP
182 unsigned int count = 0;
183 int cpu;
184
185 for_each_possible_cpu(cpu) {
186 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
187 }
188
189 return count;
190 #else
191 return mnt->mnt_count;
192 #endif
193 }
194
195 static void drop_mountpoint(struct fs_pin *p)
196 {
197 struct mount *m = container_of(p, struct mount, mnt_umount);
198 dput(m->mnt_ex_mountpoint);
199 pin_remove(p);
200 mntput(&m->mnt);
201 }
202
203 static struct mount *alloc_vfsmnt(const char *name)
204 {
205 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
206 if (mnt) {
207 int err;
208
209 err = mnt_alloc_id(mnt);
210 if (err)
211 goto out_free_cache;
212
213 if (name) {
214 mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL);
215 if (!mnt->mnt_devname)
216 goto out_free_id;
217 }
218
219 #ifdef CONFIG_SMP
220 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
221 if (!mnt->mnt_pcp)
222 goto out_free_devname;
223
224 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
225 #else
226 mnt->mnt_count = 1;
227 mnt->mnt_writers = 0;
228 #endif
229
230 INIT_HLIST_NODE(&mnt->mnt_hash);
231 INIT_LIST_HEAD(&mnt->mnt_child);
232 INIT_LIST_HEAD(&mnt->mnt_mounts);
233 INIT_LIST_HEAD(&mnt->mnt_list);
234 INIT_LIST_HEAD(&mnt->mnt_expire);
235 INIT_LIST_HEAD(&mnt->mnt_share);
236 INIT_LIST_HEAD(&mnt->mnt_slave_list);
237 INIT_LIST_HEAD(&mnt->mnt_slave);
238 INIT_HLIST_NODE(&mnt->mnt_mp_list);
239 INIT_LIST_HEAD(&mnt->mnt_umounting);
240 init_fs_pin(&mnt->mnt_umount, drop_mountpoint);
241 }
242 return mnt;
243
244 #ifdef CONFIG_SMP
245 out_free_devname:
246 kfree_const(mnt->mnt_devname);
247 #endif
248 out_free_id:
249 mnt_free_id(mnt);
250 out_free_cache:
251 kmem_cache_free(mnt_cache, mnt);
252 return NULL;
253 }
254
255 /*
256 * Most r/o checks on a fs are for operations that take
257 * discrete amounts of time, like a write() or unlink().
258 * We must keep track of when those operations start
259 * (for permission checks) and when they end, so that
260 * we can determine when writes are able to occur to
261 * a filesystem.
262 */
263 /*
264 * __mnt_is_readonly: check whether a mount is read-only
265 * @mnt: the mount to check for its write status
266 *
267 * This shouldn't be used directly ouside of the VFS.
268 * It does not guarantee that the filesystem will stay
269 * r/w, just that it is right *now*. This can not and
270 * should not be used in place of IS_RDONLY(inode).
271 * mnt_want/drop_write() will _keep_ the filesystem
272 * r/w.
273 */
274 int __mnt_is_readonly(struct vfsmount *mnt)
275 {
276 if (mnt->mnt_flags & MNT_READONLY)
277 return 1;
278 if (sb_rdonly(mnt->mnt_sb))
279 return 1;
280 return 0;
281 }
282 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
283
284 static inline void mnt_inc_writers(struct mount *mnt)
285 {
286 #ifdef CONFIG_SMP
287 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
288 #else
289 mnt->mnt_writers++;
290 #endif
291 }
292
293 static inline void mnt_dec_writers(struct mount *mnt)
294 {
295 #ifdef CONFIG_SMP
296 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
297 #else
298 mnt->mnt_writers--;
299 #endif
300 }
301
302 static unsigned int mnt_get_writers(struct mount *mnt)
303 {
304 #ifdef CONFIG_SMP
305 unsigned int count = 0;
306 int cpu;
307
308 for_each_possible_cpu(cpu) {
309 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
310 }
311
312 return count;
313 #else
314 return mnt->mnt_writers;
315 #endif
316 }
317
318 static int mnt_is_readonly(struct vfsmount *mnt)
319 {
320 if (mnt->mnt_sb->s_readonly_remount)
321 return 1;
322 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
323 smp_rmb();
324 return __mnt_is_readonly(mnt);
325 }
326
327 /*
328 * Most r/o & frozen checks on a fs are for operations that take discrete
329 * amounts of time, like a write() or unlink(). We must keep track of when
330 * those operations start (for permission checks) and when they end, so that we
331 * can determine when writes are able to occur to a filesystem.
332 */
333 /**
334 * __mnt_want_write - get write access to a mount without freeze protection
335 * @m: the mount on which to take a write
336 *
337 * This tells the low-level filesystem that a write is about to be performed to
338 * it, and makes sure that writes are allowed (mnt it read-write) before
339 * returning success. This operation does not protect against filesystem being
340 * frozen. When the write operation is finished, __mnt_drop_write() must be
341 * called. This is effectively a refcount.
342 */
343 int __mnt_want_write(struct vfsmount *m)
344 {
345 struct mount *mnt = real_mount(m);
346 int ret = 0;
347
348 preempt_disable();
349 mnt_inc_writers(mnt);
350 /*
351 * The store to mnt_inc_writers must be visible before we pass
352 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
353 * incremented count after it has set MNT_WRITE_HOLD.
354 */
355 smp_mb();
356 while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
357 cpu_relax();
358 /*
359 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
360 * be set to match its requirements. So we must not load that until
361 * MNT_WRITE_HOLD is cleared.
362 */
363 smp_rmb();
364 if (mnt_is_readonly(m)) {
365 mnt_dec_writers(mnt);
366 ret = -EROFS;
367 }
368 preempt_enable();
369
370 return ret;
371 }
372
373 /**
374 * mnt_want_write - get write access to a mount
375 * @m: the mount on which to take a write
376 *
377 * This tells the low-level filesystem that a write is about to be performed to
378 * it, and makes sure that writes are allowed (mount is read-write, filesystem
379 * is not frozen) before returning success. When the write operation is
380 * finished, mnt_drop_write() must be called. This is effectively a refcount.
381 */
382 int mnt_want_write(struct vfsmount *m)
383 {
384 int ret;
385
386 sb_start_write(m->mnt_sb);
387 ret = __mnt_want_write(m);
388 if (ret)
389 sb_end_write(m->mnt_sb);
390 return ret;
391 }
392 EXPORT_SYMBOL_GPL(mnt_want_write);
393
394 /**
395 * mnt_clone_write - get write access to a mount
396 * @mnt: the mount on which to take a write
397 *
398 * This is effectively like mnt_want_write, except
399 * it must only be used to take an extra write reference
400 * on a mountpoint that we already know has a write reference
401 * on it. This allows some optimisation.
402 *
403 * After finished, mnt_drop_write must be called as usual to
404 * drop the reference.
405 */
406 int mnt_clone_write(struct vfsmount *mnt)
407 {
408 /* superblock may be r/o */
409 if (__mnt_is_readonly(mnt))
410 return -EROFS;
411 preempt_disable();
412 mnt_inc_writers(real_mount(mnt));
413 preempt_enable();
414 return 0;
415 }
416 EXPORT_SYMBOL_GPL(mnt_clone_write);
417
418 /**
419 * __mnt_want_write_file - get write access to a file's mount
420 * @file: the file who's mount on which to take a write
421 *
422 * This is like __mnt_want_write, but it takes a file and can
423 * do some optimisations if the file is open for write already
424 */
425 int __mnt_want_write_file(struct file *file)
426 {
427 if (!(file->f_mode & FMODE_WRITER))
428 return __mnt_want_write(file->f_path.mnt);
429 else
430 return mnt_clone_write(file->f_path.mnt);
431 }
432
433 /**
434 * mnt_want_write_file - get write access to a file's mount
435 * @file: the file who's mount on which to take a write
436 *
437 * This is like mnt_want_write, but it takes a file and can
438 * do some optimisations if the file is open for write already
439 */
440 int mnt_want_write_file(struct file *file)
441 {
442 int ret;
443
444 sb_start_write(file_inode(file)->i_sb);
445 ret = __mnt_want_write_file(file);
446 if (ret)
447 sb_end_write(file_inode(file)->i_sb);
448 return ret;
449 }
450 EXPORT_SYMBOL_GPL(mnt_want_write_file);
451
452 /**
453 * __mnt_drop_write - give up write access to a mount
454 * @mnt: the mount on which to give up write access
455 *
456 * Tells the low-level filesystem that we are done
457 * performing writes to it. Must be matched with
458 * __mnt_want_write() call above.
459 */
460 void __mnt_drop_write(struct vfsmount *mnt)
461 {
462 preempt_disable();
463 mnt_dec_writers(real_mount(mnt));
464 preempt_enable();
465 }
466
467 /**
468 * mnt_drop_write - give up write access to a mount
469 * @mnt: the mount on which to give up write access
470 *
471 * Tells the low-level filesystem that we are done performing writes to it and
472 * also allows filesystem to be frozen again. Must be matched with
473 * mnt_want_write() call above.
474 */
475 void mnt_drop_write(struct vfsmount *mnt)
476 {
477 __mnt_drop_write(mnt);
478 sb_end_write(mnt->mnt_sb);
479 }
480 EXPORT_SYMBOL_GPL(mnt_drop_write);
481
482 void __mnt_drop_write_file(struct file *file)
483 {
484 __mnt_drop_write(file->f_path.mnt);
485 }
486
487 void mnt_drop_write_file(struct file *file)
488 {
489 __mnt_drop_write_file(file);
490 sb_end_write(file_inode(file)->i_sb);
491 }
492 EXPORT_SYMBOL(mnt_drop_write_file);
493
494 static int mnt_make_readonly(struct mount *mnt)
495 {
496 int ret = 0;
497
498 lock_mount_hash();
499 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
500 /*
501 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
502 * should be visible before we do.
503 */
504 smp_mb();
505
506 /*
507 * With writers on hold, if this value is zero, then there are
508 * definitely no active writers (although held writers may subsequently
509 * increment the count, they'll have to wait, and decrement it after
510 * seeing MNT_READONLY).
511 *
512 * It is OK to have counter incremented on one CPU and decremented on
513 * another: the sum will add up correctly. The danger would be when we
514 * sum up each counter, if we read a counter before it is incremented,
515 * but then read another CPU's count which it has been subsequently
516 * decremented from -- we would see more decrements than we should.
517 * MNT_WRITE_HOLD protects against this scenario, because
518 * mnt_want_write first increments count, then smp_mb, then spins on
519 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
520 * we're counting up here.
521 */
522 if (mnt_get_writers(mnt) > 0)
523 ret = -EBUSY;
524 else
525 mnt->mnt.mnt_flags |= MNT_READONLY;
526 /*
527 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
528 * that become unheld will see MNT_READONLY.
529 */
530 smp_wmb();
531 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
532 unlock_mount_hash();
533 return ret;
534 }
535
536 static void __mnt_unmake_readonly(struct mount *mnt)
537 {
538 lock_mount_hash();
539 mnt->mnt.mnt_flags &= ~MNT_READONLY;
540 unlock_mount_hash();
541 }
542
543 int sb_prepare_remount_readonly(struct super_block *sb)
544 {
545 struct mount *mnt;
546 int err = 0;
547
548 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
549 if (atomic_long_read(&sb->s_remove_count))
550 return -EBUSY;
551
552 lock_mount_hash();
553 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
554 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
555 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
556 smp_mb();
557 if (mnt_get_writers(mnt) > 0) {
558 err = -EBUSY;
559 break;
560 }
561 }
562 }
563 if (!err && atomic_long_read(&sb->s_remove_count))
564 err = -EBUSY;
565
566 if (!err) {
567 sb->s_readonly_remount = 1;
568 smp_wmb();
569 }
570 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
571 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
572 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
573 }
574 unlock_mount_hash();
575
576 return err;
577 }
578
579 static void free_vfsmnt(struct mount *mnt)
580 {
581 kfree_const(mnt->mnt_devname);
582 #ifdef CONFIG_SMP
583 free_percpu(mnt->mnt_pcp);
584 #endif
585 kmem_cache_free(mnt_cache, mnt);
586 }
587
588 static void delayed_free_vfsmnt(struct rcu_head *head)
589 {
590 free_vfsmnt(container_of(head, struct mount, mnt_rcu));
591 }
592
593 /* call under rcu_read_lock */
594 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
595 {
596 struct mount *mnt;
597 if (read_seqretry(&mount_lock, seq))
598 return 1;
599 if (bastard == NULL)
600 return 0;
601 mnt = real_mount(bastard);
602 mnt_add_count(mnt, 1);
603 smp_mb(); // see mntput_no_expire()
604 if (likely(!read_seqretry(&mount_lock, seq)))
605 return 0;
606 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
607 mnt_add_count(mnt, -1);
608 return 1;
609 }
610 lock_mount_hash();
611 if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
612 mnt_add_count(mnt, -1);
613 unlock_mount_hash();
614 return 1;
615 }
616 unlock_mount_hash();
617 /* caller will mntput() */
618 return -1;
619 }
620
621 /* call under rcu_read_lock */
622 bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
623 {
624 int res = __legitimize_mnt(bastard, seq);
625 if (likely(!res))
626 return true;
627 if (unlikely(res < 0)) {
628 rcu_read_unlock();
629 mntput(bastard);
630 rcu_read_lock();
631 }
632 return false;
633 }
634
635 /*
636 * find the first mount at @dentry on vfsmount @mnt.
637 * call under rcu_read_lock()
638 */
639 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
640 {
641 struct hlist_head *head = m_hash(mnt, dentry);
642 struct mount *p;
643
644 hlist_for_each_entry_rcu(p, head, mnt_hash)
645 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
646 return p;
647 return NULL;
648 }
649
650 /*
651 * lookup_mnt - Return the first child mount mounted at path
652 *
653 * "First" means first mounted chronologically. If you create the
654 * following mounts:
655 *
656 * mount /dev/sda1 /mnt
657 * mount /dev/sda2 /mnt
658 * mount /dev/sda3 /mnt
659 *
660 * Then lookup_mnt() on the base /mnt dentry in the root mount will
661 * return successively the root dentry and vfsmount of /dev/sda1, then
662 * /dev/sda2, then /dev/sda3, then NULL.
663 *
664 * lookup_mnt takes a reference to the found vfsmount.
665 */
666 struct vfsmount *lookup_mnt(const struct path *path)
667 {
668 struct mount *child_mnt;
669 struct vfsmount *m;
670 unsigned seq;
671
672 rcu_read_lock();
673 do {
674 seq = read_seqbegin(&mount_lock);
675 child_mnt = __lookup_mnt(path->mnt, path->dentry);
676 m = child_mnt ? &child_mnt->mnt : NULL;
677 } while (!legitimize_mnt(m, seq));
678 rcu_read_unlock();
679 return m;
680 }
681
682 /*
683 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
684 * current mount namespace.
685 *
686 * The common case is dentries are not mountpoints at all and that
687 * test is handled inline. For the slow case when we are actually
688 * dealing with a mountpoint of some kind, walk through all of the
689 * mounts in the current mount namespace and test to see if the dentry
690 * is a mountpoint.
691 *
692 * The mount_hashtable is not usable in the context because we
693 * need to identify all mounts that may be in the current mount
694 * namespace not just a mount that happens to have some specified
695 * parent mount.
696 */
697 bool __is_local_mountpoint(struct dentry *dentry)
698 {
699 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
700 struct mount *mnt;
701 bool is_covered = false;
702
703 if (!d_mountpoint(dentry))
704 goto out;
705
706 down_read(&namespace_sem);
707 list_for_each_entry(mnt, &ns->list, mnt_list) {
708 is_covered = (mnt->mnt_mountpoint == dentry);
709 if (is_covered)
710 break;
711 }
712 up_read(&namespace_sem);
713 out:
714 return is_covered;
715 }
716
717 static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
718 {
719 struct hlist_head *chain = mp_hash(dentry);
720 struct mountpoint *mp;
721
722 hlist_for_each_entry(mp, chain, m_hash) {
723 if (mp->m_dentry == dentry) {
724 /* might be worth a WARN_ON() */
725 if (d_unlinked(dentry))
726 return ERR_PTR(-ENOENT);
727 mp->m_count++;
728 return mp;
729 }
730 }
731 return NULL;
732 }
733
734 static struct mountpoint *get_mountpoint(struct dentry *dentry)
735 {
736 struct mountpoint *mp, *new = NULL;
737 int ret;
738
739 if (d_mountpoint(dentry)) {
740 mountpoint:
741 read_seqlock_excl(&mount_lock);
742 mp = lookup_mountpoint(dentry);
743 read_sequnlock_excl(&mount_lock);
744 if (mp)
745 goto done;
746 }
747
748 if (!new)
749 new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
750 if (!new)
751 return ERR_PTR(-ENOMEM);
752
753
754 /* Exactly one processes may set d_mounted */
755 ret = d_set_mounted(dentry);
756
757 /* Someone else set d_mounted? */
758 if (ret == -EBUSY)
759 goto mountpoint;
760
761 /* The dentry is not available as a mountpoint? */
762 mp = ERR_PTR(ret);
763 if (ret)
764 goto done;
765
766 /* Add the new mountpoint to the hash table */
767 read_seqlock_excl(&mount_lock);
768 new->m_dentry = dentry;
769 new->m_count = 1;
770 hlist_add_head(&new->m_hash, mp_hash(dentry));
771 INIT_HLIST_HEAD(&new->m_list);
772 read_sequnlock_excl(&mount_lock);
773
774 mp = new;
775 new = NULL;
776 done:
777 kfree(new);
778 return mp;
779 }
780
781 static void put_mountpoint(struct mountpoint *mp)
782 {
783 if (!--mp->m_count) {
784 struct dentry *dentry = mp->m_dentry;
785 BUG_ON(!hlist_empty(&mp->m_list));
786 spin_lock(&dentry->d_lock);
787 dentry->d_flags &= ~DCACHE_MOUNTED;
788 spin_unlock(&dentry->d_lock);
789 hlist_del(&mp->m_hash);
790 kfree(mp);
791 }
792 }
793
794 static inline int check_mnt(struct mount *mnt)
795 {
796 return mnt->mnt_ns == current->nsproxy->mnt_ns;
797 }
798
799 /*
800 * vfsmount lock must be held for write
801 */
802 static void touch_mnt_namespace(struct mnt_namespace *ns)
803 {
804 if (ns) {
805 ns->event = ++event;
806 wake_up_interruptible(&ns->poll);
807 }
808 }
809
810 /*
811 * vfsmount lock must be held for write
812 */
813 static void __touch_mnt_namespace(struct mnt_namespace *ns)
814 {
815 if (ns && ns->event != event) {
816 ns->event = event;
817 wake_up_interruptible(&ns->poll);
818 }
819 }
820
821 /*
822 * vfsmount lock must be held for write
823 */
824 static void unhash_mnt(struct mount *mnt)
825 {
826 mnt->mnt_parent = mnt;
827 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
828 list_del_init(&mnt->mnt_child);
829 hlist_del_init_rcu(&mnt->mnt_hash);
830 hlist_del_init(&mnt->mnt_mp_list);
831 put_mountpoint(mnt->mnt_mp);
832 mnt->mnt_mp = NULL;
833 }
834
835 /*
836 * vfsmount lock must be held for write
837 */
838 static void detach_mnt(struct mount *mnt, struct path *old_path)
839 {
840 old_path->dentry = mnt->mnt_mountpoint;
841 old_path->mnt = &mnt->mnt_parent->mnt;
842 unhash_mnt(mnt);
843 }
844
845 /*
846 * vfsmount lock must be held for write
847 */
848 static void umount_mnt(struct mount *mnt)
849 {
850 /* old mountpoint will be dropped when we can do that */
851 mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;
852 unhash_mnt(mnt);
853 }
854
855 /*
856 * vfsmount lock must be held for write
857 */
858 void mnt_set_mountpoint(struct mount *mnt,
859 struct mountpoint *mp,
860 struct mount *child_mnt)
861 {
862 mp->m_count++;
863 mnt_add_count(mnt, 1); /* essentially, that's mntget */
864 child_mnt->mnt_mountpoint = dget(mp->m_dentry);
865 child_mnt->mnt_parent = mnt;
866 child_mnt->mnt_mp = mp;
867 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
868 }
869
870 static void __attach_mnt(struct mount *mnt, struct mount *parent)
871 {
872 hlist_add_head_rcu(&mnt->mnt_hash,
873 m_hash(&parent->mnt, mnt->mnt_mountpoint));
874 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
875 }
876
877 /*
878 * vfsmount lock must be held for write
879 */
880 static void attach_mnt(struct mount *mnt,
881 struct mount *parent,
882 struct mountpoint *mp)
883 {
884 mnt_set_mountpoint(parent, mp, mnt);
885 __attach_mnt(mnt, parent);
886 }
887
888 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
889 {
890 struct mountpoint *old_mp = mnt->mnt_mp;
891 struct dentry *old_mountpoint = mnt->mnt_mountpoint;
892 struct mount *old_parent = mnt->mnt_parent;
893
894 list_del_init(&mnt->mnt_child);
895 hlist_del_init(&mnt->mnt_mp_list);
896 hlist_del_init_rcu(&mnt->mnt_hash);
897
898 attach_mnt(mnt, parent, mp);
899
900 put_mountpoint(old_mp);
901
902 /*
903 * Safely avoid even the suggestion this code might sleep or
904 * lock the mount hash by taking advantage of the knowledge that
905 * mnt_change_mountpoint will not release the final reference
906 * to a mountpoint.
907 *
908 * During mounting, the mount passed in as the parent mount will
909 * continue to use the old mountpoint and during unmounting, the
910 * old mountpoint will continue to exist until namespace_unlock,
911 * which happens well after mnt_change_mountpoint.
912 */
913 spin_lock(&old_mountpoint->d_lock);
914 old_mountpoint->d_lockref.count--;
915 spin_unlock(&old_mountpoint->d_lock);
916
917 mnt_add_count(old_parent, -1);
918 }
919
920 /*
921 * vfsmount lock must be held for write
922 */
923 static void commit_tree(struct mount *mnt)
924 {
925 struct mount *parent = mnt->mnt_parent;
926 struct mount *m;
927 LIST_HEAD(head);
928 struct mnt_namespace *n = parent->mnt_ns;
929
930 BUG_ON(parent == mnt);
931
932 list_add_tail(&head, &mnt->mnt_list);
933 list_for_each_entry(m, &head, mnt_list)
934 m->mnt_ns = n;
935
936 list_splice(&head, n->list.prev);
937
938 n->mounts += n->pending_mounts;
939 n->pending_mounts = 0;
940
941 __attach_mnt(mnt, parent);
942 touch_mnt_namespace(n);
943 }
944
945 static struct mount *next_mnt(struct mount *p, struct mount *root)
946 {
947 struct list_head *next = p->mnt_mounts.next;
948 if (next == &p->mnt_mounts) {
949 while (1) {
950 if (p == root)
951 return NULL;
952 next = p->mnt_child.next;
953 if (next != &p->mnt_parent->mnt_mounts)
954 break;
955 p = p->mnt_parent;
956 }
957 }
958 return list_entry(next, struct mount, mnt_child);
959 }
960
961 static struct mount *skip_mnt_tree(struct mount *p)
962 {
963 struct list_head *prev = p->mnt_mounts.prev;
964 while (prev != &p->mnt_mounts) {
965 p = list_entry(prev, struct mount, mnt_child);
966 prev = p->mnt_mounts.prev;
967 }
968 return p;
969 }
970
971 struct vfsmount *
972 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
973 {
974 struct mount *mnt;
975 struct dentry *root;
976
977 if (!type)
978 return ERR_PTR(-ENODEV);
979
980 mnt = alloc_vfsmnt(name);
981 if (!mnt)
982 return ERR_PTR(-ENOMEM);
983
984 if (flags & SB_KERNMOUNT)
985 mnt->mnt.mnt_flags = MNT_INTERNAL;
986
987 root = mount_fs(type, flags, name, data);
988 if (IS_ERR(root)) {
989 mnt_free_id(mnt);
990 free_vfsmnt(mnt);
991 return ERR_CAST(root);
992 }
993
994 mnt->mnt.mnt_root = root;
995 mnt->mnt.mnt_sb = root->d_sb;
996 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
997 mnt->mnt_parent = mnt;
998 lock_mount_hash();
999 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
1000 unlock_mount_hash();
1001 return &mnt->mnt;
1002 }
1003 EXPORT_SYMBOL_GPL(vfs_kern_mount);
1004
1005 struct vfsmount *
1006 vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
1007 const char *name, void *data)
1008 {
1009 /* Until it is worked out how to pass the user namespace
1010 * through from the parent mount to the submount don't support
1011 * unprivileged mounts with submounts.
1012 */
1013 if (mountpoint->d_sb->s_user_ns != &init_user_ns)
1014 return ERR_PTR(-EPERM);
1015
1016 return vfs_kern_mount(type, SB_SUBMOUNT, name, data);
1017 }
1018 EXPORT_SYMBOL_GPL(vfs_submount);
1019
1020 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
1021 int flag)
1022 {
1023 struct super_block *sb = old->mnt.mnt_sb;
1024 struct mount *mnt;
1025 int err;
1026
1027 mnt = alloc_vfsmnt(old->mnt_devname);
1028 if (!mnt)
1029 return ERR_PTR(-ENOMEM);
1030
1031 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
1032 mnt->mnt_group_id = 0; /* not a peer of original */
1033 else
1034 mnt->mnt_group_id = old->mnt_group_id;
1035
1036 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
1037 err = mnt_alloc_group_id(mnt);
1038 if (err)
1039 goto out_free;
1040 }
1041
1042 mnt->mnt.mnt_flags = old->mnt.mnt_flags;
1043 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
1044 /* Don't allow unprivileged users to change mount flags */
1045 if (flag & CL_UNPRIVILEGED) {
1046 mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
1047
1048 if (mnt->mnt.mnt_flags & MNT_READONLY)
1049 mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
1050
1051 if (mnt->mnt.mnt_flags & MNT_NODEV)
1052 mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
1053
1054 if (mnt->mnt.mnt_flags & MNT_NOSUID)
1055 mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
1056
1057 if (mnt->mnt.mnt_flags & MNT_NOEXEC)
1058 mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
1059 }
1060
1061 /* Don't allow unprivileged users to reveal what is under a mount */
1062 if ((flag & CL_UNPRIVILEGED) &&
1063 (!(flag & CL_EXPIRE) || list_empty(&old->mnt_expire)))
1064 mnt->mnt.mnt_flags |= MNT_LOCKED;
1065
1066 atomic_inc(&sb->s_active);
1067 mnt->mnt.mnt_sb = sb;
1068 mnt->mnt.mnt_root = dget(root);
1069 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1070 mnt->mnt_parent = mnt;
1071 lock_mount_hash();
1072 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
1073 unlock_mount_hash();
1074
1075 if ((flag & CL_SLAVE) ||
1076 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
1077 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
1078 mnt->mnt_master = old;
1079 CLEAR_MNT_SHARED(mnt);
1080 } else if (!(flag & CL_PRIVATE)) {
1081 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
1082 list_add(&mnt->mnt_share, &old->mnt_share);
1083 if (IS_MNT_SLAVE(old))
1084 list_add(&mnt->mnt_slave, &old->mnt_slave);
1085 mnt->mnt_master = old->mnt_master;
1086 } else {
1087 CLEAR_MNT_SHARED(mnt);
1088 }
1089 if (flag & CL_MAKE_SHARED)
1090 set_mnt_shared(mnt);
1091
1092 /* stick the duplicate mount on the same expiry list
1093 * as the original if that was on one */
1094 if (flag & CL_EXPIRE) {
1095 if (!list_empty(&old->mnt_expire))
1096 list_add(&mnt->mnt_expire, &old->mnt_expire);
1097 }
1098
1099 return mnt;
1100
1101 out_free:
1102 mnt_free_id(mnt);
1103 free_vfsmnt(mnt);
1104 return ERR_PTR(err);
1105 }
1106
1107 static void cleanup_mnt(struct mount *mnt)
1108 {
1109 /*
1110 * This probably indicates that somebody messed
1111 * up a mnt_want/drop_write() pair. If this
1112 * happens, the filesystem was probably unable
1113 * to make r/w->r/o transitions.
1114 */
1115 /*
1116 * The locking used to deal with mnt_count decrement provides barriers,
1117 * so mnt_get_writers() below is safe.
1118 */
1119 WARN_ON(mnt_get_writers(mnt));
1120 if (unlikely(mnt->mnt_pins.first))
1121 mnt_pin_kill(mnt);
1122 fsnotify_vfsmount_delete(&mnt->mnt);
1123 dput(mnt->mnt.mnt_root);
1124 deactivate_super(mnt->mnt.mnt_sb);
1125 mnt_free_id(mnt);
1126 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1127 }
1128
1129 static void __cleanup_mnt(struct rcu_head *head)
1130 {
1131 cleanup_mnt(container_of(head, struct mount, mnt_rcu));
1132 }
1133
1134 static LLIST_HEAD(delayed_mntput_list);
1135 static void delayed_mntput(struct work_struct *unused)
1136 {
1137 struct llist_node *node = llist_del_all(&delayed_mntput_list);
1138 struct mount *m, *t;
1139
1140 llist_for_each_entry_safe(m, t, node, mnt_llist)
1141 cleanup_mnt(m);
1142 }
1143 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1144
1145 static void mntput_no_expire(struct mount *mnt)
1146 {
1147 rcu_read_lock();
1148 if (likely(READ_ONCE(mnt->mnt_ns))) {
1149 /*
1150 * Since we don't do lock_mount_hash() here,
1151 * ->mnt_ns can change under us. However, if it's
1152 * non-NULL, then there's a reference that won't
1153 * be dropped until after an RCU delay done after
1154 * turning ->mnt_ns NULL. So if we observe it
1155 * non-NULL under rcu_read_lock(), the reference
1156 * we are dropping is not the final one.
1157 */
1158 mnt_add_count(mnt, -1);
1159 rcu_read_unlock();
1160 return;
1161 }
1162 lock_mount_hash();
1163 /*
1164 * make sure that if __legitimize_mnt() has not seen us grab
1165 * mount_lock, we'll see their refcount increment here.
1166 */
1167 smp_mb();
1168 mnt_add_count(mnt, -1);
1169 if (mnt_get_count(mnt)) {
1170 rcu_read_unlock();
1171 unlock_mount_hash();
1172 return;
1173 }
1174 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1175 rcu_read_unlock();
1176 unlock_mount_hash();
1177 return;
1178 }
1179 mnt->mnt.mnt_flags |= MNT_DOOMED;
1180 rcu_read_unlock();
1181
1182 list_del(&mnt->mnt_instance);
1183
1184 if (unlikely(!list_empty(&mnt->mnt_mounts))) {
1185 struct mount *p, *tmp;
1186 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
1187 umount_mnt(p);
1188 }
1189 }
1190 unlock_mount_hash();
1191
1192 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1193 struct task_struct *task = current;
1194 if (likely(!(task->flags & PF_KTHREAD))) {
1195 init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1196 if (!task_work_add(task, &mnt->mnt_rcu, true))
1197 return;
1198 }
1199 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1200 schedule_delayed_work(&delayed_mntput_work, 1);
1201 return;
1202 }
1203 cleanup_mnt(mnt);
1204 }
1205
1206 void mntput(struct vfsmount *mnt)
1207 {
1208 if (mnt) {
1209 struct mount *m = real_mount(mnt);
1210 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1211 if (unlikely(m->mnt_expiry_mark))
1212 m->mnt_expiry_mark = 0;
1213 mntput_no_expire(m);
1214 }
1215 }
1216 EXPORT_SYMBOL(mntput);
1217
1218 struct vfsmount *mntget(struct vfsmount *mnt)
1219 {
1220 if (mnt)
1221 mnt_add_count(real_mount(mnt), 1);
1222 return mnt;
1223 }
1224 EXPORT_SYMBOL(mntget);
1225
1226 /* path_is_mountpoint() - Check if path is a mount in the current
1227 * namespace.
1228 *
1229 * d_mountpoint() can only be used reliably to establish if a dentry is
1230 * not mounted in any namespace and that common case is handled inline.
1231 * d_mountpoint() isn't aware of the possibility there may be multiple
1232 * mounts using a given dentry in a different namespace. This function
1233 * checks if the passed in path is a mountpoint rather than the dentry
1234 * alone.
1235 */
1236 bool path_is_mountpoint(const struct path *path)
1237 {
1238 unsigned seq;
1239 bool res;
1240
1241 if (!d_mountpoint(path->dentry))
1242 return false;
1243
1244 rcu_read_lock();
1245 do {
1246 seq = read_seqbegin(&mount_lock);
1247 res = __path_is_mountpoint(path);
1248 } while (read_seqretry(&mount_lock, seq));
1249 rcu_read_unlock();
1250
1251 return res;
1252 }
1253 EXPORT_SYMBOL(path_is_mountpoint);
1254
1255 struct vfsmount *mnt_clone_internal(const struct path *path)
1256 {
1257 struct mount *p;
1258 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1259 if (IS_ERR(p))
1260 return ERR_CAST(p);
1261 p->mnt.mnt_flags |= MNT_INTERNAL;
1262 return &p->mnt;
1263 }
1264
1265 #ifdef CONFIG_PROC_FS
1266 /* iterator; we want it to have access to namespace_sem, thus here... */
1267 static void *m_start(struct seq_file *m, loff_t *pos)
1268 {
1269 struct proc_mounts *p = m->private;
1270
1271 down_read(&namespace_sem);
1272 if (p->cached_event == p->ns->event) {
1273 void *v = p->cached_mount;
1274 if (*pos == p->cached_index)
1275 return v;
1276 if (*pos == p->cached_index + 1) {
1277 v = seq_list_next(v, &p->ns->list, &p->cached_index);
1278 return p->cached_mount = v;
1279 }
1280 }
1281
1282 p->cached_event = p->ns->event;
1283 p->cached_mount = seq_list_start(&p->ns->list, *pos);
1284 p->cached_index = *pos;
1285 return p->cached_mount;
1286 }
1287
1288 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1289 {
1290 struct proc_mounts *p = m->private;
1291
1292 p->cached_mount = seq_list_next(v, &p->ns->list, pos);
1293 p->cached_index = *pos;
1294 return p->cached_mount;
1295 }
1296
1297 static void m_stop(struct seq_file *m, void *v)
1298 {
1299 up_read(&namespace_sem);
1300 }
1301
1302 static int m_show(struct seq_file *m, void *v)
1303 {
1304 struct proc_mounts *p = m->private;
1305 struct mount *r = list_entry(v, struct mount, mnt_list);
1306 return p->show(m, &r->mnt);
1307 }
1308
1309 const struct seq_operations mounts_op = {
1310 .start = m_start,
1311 .next = m_next,
1312 .stop = m_stop,
1313 .show = m_show,
1314 };
1315 #endif /* CONFIG_PROC_FS */
1316
1317 /**
1318 * may_umount_tree - check if a mount tree is busy
1319 * @mnt: root of mount tree
1320 *
1321 * This is called to check if a tree of mounts has any
1322 * open files, pwds, chroots or sub mounts that are
1323 * busy.
1324 */
1325 int may_umount_tree(struct vfsmount *m)
1326 {
1327 struct mount *mnt = real_mount(m);
1328 int actual_refs = 0;
1329 int minimum_refs = 0;
1330 struct mount *p;
1331 BUG_ON(!m);
1332
1333 /* write lock needed for mnt_get_count */
1334 lock_mount_hash();
1335 for (p = mnt; p; p = next_mnt(p, mnt)) {
1336 actual_refs += mnt_get_count(p);
1337 minimum_refs += 2;
1338 }
1339 unlock_mount_hash();
1340
1341 if (actual_refs > minimum_refs)
1342 return 0;
1343
1344 return 1;
1345 }
1346
1347 EXPORT_SYMBOL(may_umount_tree);
1348
1349 /**
1350 * may_umount - check if a mount point is busy
1351 * @mnt: root of mount
1352 *
1353 * This is called to check if a mount point has any
1354 * open files, pwds, chroots or sub mounts. If the
1355 * mount has sub mounts this will return busy
1356 * regardless of whether the sub mounts are busy.
1357 *
1358 * Doesn't take quota and stuff into account. IOW, in some cases it will
1359 * give false negatives. The main reason why it's here is that we need
1360 * a non-destructive way to look for easily umountable filesystems.
1361 */
1362 int may_umount(struct vfsmount *mnt)
1363 {
1364 int ret = 1;
1365 down_read(&namespace_sem);
1366 lock_mount_hash();
1367 if (propagate_mount_busy(real_mount(mnt), 2))
1368 ret = 0;
1369 unlock_mount_hash();
1370 up_read(&namespace_sem);
1371 return ret;
1372 }
1373
1374 EXPORT_SYMBOL(may_umount);
1375
1376 static HLIST_HEAD(unmounted); /* protected by namespace_sem */
1377
1378 static void namespace_unlock(void)
1379 {
1380 struct hlist_head head;
1381
1382 hlist_move_list(&unmounted, &head);
1383
1384 up_write(&namespace_sem);
1385
1386 if (likely(hlist_empty(&head)))
1387 return;
1388
1389 synchronize_rcu();
1390
1391 group_pin_kill(&head);
1392 }
1393
1394 static inline void namespace_lock(void)
1395 {
1396 down_write(&namespace_sem);
1397 }
1398
1399 enum umount_tree_flags {
1400 UMOUNT_SYNC = 1,
1401 UMOUNT_PROPAGATE = 2,
1402 UMOUNT_CONNECTED = 4,
1403 };
1404
1405 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1406 {
1407 /* Leaving mounts connected is only valid for lazy umounts */
1408 if (how & UMOUNT_SYNC)
1409 return true;
1410
1411 /* A mount without a parent has nothing to be connected to */
1412 if (!mnt_has_parent(mnt))
1413 return true;
1414
1415 /* Because the reference counting rules change when mounts are
1416 * unmounted and connected, umounted mounts may not be
1417 * connected to mounted mounts.
1418 */
1419 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1420 return true;
1421
1422 /* Has it been requested that the mount remain connected? */
1423 if (how & UMOUNT_CONNECTED)
1424 return false;
1425
1426 /* Is the mount locked such that it needs to remain connected? */
1427 if (IS_MNT_LOCKED(mnt))
1428 return false;
1429
1430 /* By default disconnect the mount */
1431 return true;
1432 }
1433
1434 /*
1435 * mount_lock must be held
1436 * namespace_sem must be held for write
1437 */
1438 static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1439 {
1440 LIST_HEAD(tmp_list);
1441 struct mount *p;
1442
1443 if (how & UMOUNT_PROPAGATE)
1444 propagate_mount_unlock(mnt);
1445
1446 /* Gather the mounts to umount */
1447 for (p = mnt; p; p = next_mnt(p, mnt)) {
1448 p->mnt.mnt_flags |= MNT_UMOUNT;
1449 list_move(&p->mnt_list, &tmp_list);
1450 }
1451
1452 /* Hide the mounts from mnt_mounts */
1453 list_for_each_entry(p, &tmp_list, mnt_list) {
1454 list_del_init(&p->mnt_child);
1455 }
1456
1457 /* Add propogated mounts to the tmp_list */
1458 if (how & UMOUNT_PROPAGATE)
1459 propagate_umount(&tmp_list);
1460
1461 while (!list_empty(&tmp_list)) {
1462 struct mnt_namespace *ns;
1463 bool disconnect;
1464 p = list_first_entry(&tmp_list, struct mount, mnt_list);
1465 list_del_init(&p->mnt_expire);
1466 list_del_init(&p->mnt_list);
1467 ns = p->mnt_ns;
1468 if (ns) {
1469 ns->mounts--;
1470 __touch_mnt_namespace(ns);
1471 }
1472 p->mnt_ns = NULL;
1473 if (how & UMOUNT_SYNC)
1474 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1475
1476 disconnect = disconnect_mount(p, how);
1477
1478 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
1479 disconnect ? &unmounted : NULL);
1480 if (mnt_has_parent(p)) {
1481 mnt_add_count(p->mnt_parent, -1);
1482 if (!disconnect) {
1483 /* Don't forget about p */
1484 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
1485 } else {
1486 umount_mnt(p);
1487 }
1488 }
1489 change_mnt_propagation(p, MS_PRIVATE);
1490 }
1491 }
1492
1493 static void shrink_submounts(struct mount *mnt);
1494
1495 static int do_umount(struct mount *mnt, int flags)
1496 {
1497 struct super_block *sb = mnt->mnt.mnt_sb;
1498 int retval;
1499
1500 retval = security_sb_umount(&mnt->mnt, flags);
1501 if (retval)
1502 return retval;
1503
1504 /*
1505 * Allow userspace to request a mountpoint be expired rather than
1506 * unmounting unconditionally. Unmount only happens if:
1507 * (1) the mark is already set (the mark is cleared by mntput())
1508 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1509 */
1510 if (flags & MNT_EXPIRE) {
1511 if (&mnt->mnt == current->fs->root.mnt ||
1512 flags & (MNT_FORCE | MNT_DETACH))
1513 return -EINVAL;
1514
1515 /*
1516 * probably don't strictly need the lock here if we examined
1517 * all race cases, but it's a slowpath.
1518 */
1519 lock_mount_hash();
1520 if (mnt_get_count(mnt) != 2) {
1521 unlock_mount_hash();
1522 return -EBUSY;
1523 }
1524 unlock_mount_hash();
1525
1526 if (!xchg(&mnt->mnt_expiry_mark, 1))
1527 return -EAGAIN;
1528 }
1529
1530 /*
1531 * If we may have to abort operations to get out of this
1532 * mount, and they will themselves hold resources we must
1533 * allow the fs to do things. In the Unix tradition of
1534 * 'Gee thats tricky lets do it in userspace' the umount_begin
1535 * might fail to complete on the first run through as other tasks
1536 * must return, and the like. Thats for the mount program to worry
1537 * about for the moment.
1538 */
1539
1540 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1541 sb->s_op->umount_begin(sb);
1542 }
1543
1544 /*
1545 * No sense to grab the lock for this test, but test itself looks
1546 * somewhat bogus. Suggestions for better replacement?
1547 * Ho-hum... In principle, we might treat that as umount + switch
1548 * to rootfs. GC would eventually take care of the old vfsmount.
1549 * Actually it makes sense, especially if rootfs would contain a
1550 * /reboot - static binary that would close all descriptors and
1551 * call reboot(9). Then init(8) could umount root and exec /reboot.
1552 */
1553 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1554 /*
1555 * Special case for "unmounting" root ...
1556 * we just try to remount it readonly.
1557 */
1558 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
1559 return -EPERM;
1560 down_write(&sb->s_umount);
1561 if (!sb_rdonly(sb))
1562 retval = do_remount_sb(sb, SB_RDONLY, NULL, 0);
1563 up_write(&sb->s_umount);
1564 return retval;
1565 }
1566
1567 namespace_lock();
1568 lock_mount_hash();
1569 event++;
1570
1571 if (flags & MNT_DETACH) {
1572 if (!list_empty(&mnt->mnt_list))
1573 umount_tree(mnt, UMOUNT_PROPAGATE);
1574 retval = 0;
1575 } else {
1576 shrink_submounts(mnt);
1577 retval = -EBUSY;
1578 if (!propagate_mount_busy(mnt, 2)) {
1579 if (!list_empty(&mnt->mnt_list))
1580 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1581 retval = 0;
1582 }
1583 }
1584 unlock_mount_hash();
1585 namespace_unlock();
1586 return retval;
1587 }
1588
1589 /*
1590 * __detach_mounts - lazily unmount all mounts on the specified dentry
1591 *
1592 * During unlink, rmdir, and d_drop it is possible to loose the path
1593 * to an existing mountpoint, and wind up leaking the mount.
1594 * detach_mounts allows lazily unmounting those mounts instead of
1595 * leaking them.
1596 *
1597 * The caller may hold dentry->d_inode->i_mutex.
1598 */
1599 void __detach_mounts(struct dentry *dentry)
1600 {
1601 struct mountpoint *mp;
1602 struct mount *mnt;
1603
1604 namespace_lock();
1605 lock_mount_hash();
1606 mp = lookup_mountpoint(dentry);
1607 if (IS_ERR_OR_NULL(mp))
1608 goto out_unlock;
1609
1610 event++;
1611 while (!hlist_empty(&mp->m_list)) {
1612 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1613 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1614 hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
1615 umount_mnt(mnt);
1616 }
1617 else umount_tree(mnt, UMOUNT_CONNECTED);
1618 }
1619 put_mountpoint(mp);
1620 out_unlock:
1621 unlock_mount_hash();
1622 namespace_unlock();
1623 }
1624
1625 /*
1626 * Is the caller allowed to modify his namespace?
1627 */
1628 static inline bool may_mount(void)
1629 {
1630 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1631 }
1632
1633 static inline bool may_mandlock(void)
1634 {
1635 #ifndef CONFIG_MANDATORY_FILE_LOCKING
1636 return false;
1637 #endif
1638 return capable(CAP_SYS_ADMIN);
1639 }
1640
1641 /*
1642 * Now umount can handle mount points as well as block devices.
1643 * This is important for filesystems which use unnamed block devices.
1644 *
1645 * We now support a flag for forced unmount like the other 'big iron'
1646 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1647 */
1648
1649 int ksys_umount(char __user *name, int flags)
1650 {
1651 struct path path;
1652 struct mount *mnt;
1653 int retval;
1654 int lookup_flags = 0;
1655
1656 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1657 return -EINVAL;
1658
1659 if (!may_mount())
1660 return -EPERM;
1661
1662 if (!(flags & UMOUNT_NOFOLLOW))
1663 lookup_flags |= LOOKUP_FOLLOW;
1664
1665 retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path);
1666 if (retval)
1667 goto out;
1668 mnt = real_mount(path.mnt);
1669 retval = -EINVAL;
1670 if (path.dentry != path.mnt->mnt_root)
1671 goto dput_and_out;
1672 if (!check_mnt(mnt))
1673 goto dput_and_out;
1674 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1675 goto dput_and_out;
1676 retval = -EPERM;
1677 if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
1678 goto dput_and_out;
1679
1680 retval = do_umount(mnt, flags);
1681 dput_and_out:
1682 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1683 dput(path.dentry);
1684 mntput_no_expire(mnt);
1685 out:
1686 return retval;
1687 }
1688
1689 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1690 {
1691 return ksys_umount(name, flags);
1692 }
1693
1694 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1695
1696 /*
1697 * The 2.0 compatible umount. No flags.
1698 */
1699 SYSCALL_DEFINE1(oldumount, char __user *, name)
1700 {
1701 return ksys_umount(name, 0);
1702 }
1703
1704 #endif
1705
1706 static bool is_mnt_ns_file(struct dentry *dentry)
1707 {
1708 /* Is this a proxy for a mount namespace? */
1709 return dentry->d_op == &ns_dentry_operations &&
1710 dentry->d_fsdata == &mntns_operations;
1711 }
1712
1713 struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
1714 {
1715 return container_of(ns, struct mnt_namespace, ns);
1716 }
1717
1718 static bool mnt_ns_loop(struct dentry *dentry)
1719 {
1720 /* Could bind mounting the mount namespace inode cause a
1721 * mount namespace loop?
1722 */
1723 struct mnt_namespace *mnt_ns;
1724 if (!is_mnt_ns_file(dentry))
1725 return false;
1726
1727 mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
1728 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
1729 }
1730
1731 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1732 int flag)
1733 {
1734 struct mount *res, *p, *q, *r, *parent;
1735
1736 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1737 return ERR_PTR(-EINVAL);
1738
1739 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
1740 return ERR_PTR(-EINVAL);
1741
1742 res = q = clone_mnt(mnt, dentry, flag);
1743 if (IS_ERR(q))
1744 return q;
1745
1746 q->mnt_mountpoint = mnt->mnt_mountpoint;
1747
1748 p = mnt;
1749 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1750 struct mount *s;
1751 if (!is_subdir(r->mnt_mountpoint, dentry))
1752 continue;
1753
1754 for (s = r; s; s = next_mnt(s, r)) {
1755 if (!(flag & CL_COPY_UNBINDABLE) &&
1756 IS_MNT_UNBINDABLE(s)) {
1757 s = skip_mnt_tree(s);
1758 continue;
1759 }
1760 if (!(flag & CL_COPY_MNT_NS_FILE) &&
1761 is_mnt_ns_file(s->mnt.mnt_root)) {
1762 s = skip_mnt_tree(s);
1763 continue;
1764 }
1765 while (p != s->mnt_parent) {
1766 p = p->mnt_parent;
1767 q = q->mnt_parent;
1768 }
1769 p = s;
1770 parent = q;
1771 q = clone_mnt(p, p->mnt.mnt_root, flag);
1772 if (IS_ERR(q))
1773 goto out;
1774 lock_mount_hash();
1775 list_add_tail(&q->mnt_list, &res->mnt_list);
1776 attach_mnt(q, parent, p->mnt_mp);
1777 unlock_mount_hash();
1778 }
1779 }
1780 return res;
1781 out:
1782 if (res) {
1783 lock_mount_hash();
1784 umount_tree(res, UMOUNT_SYNC);
1785 unlock_mount_hash();
1786 }
1787 return q;
1788 }
1789
1790 /* Caller should check returned pointer for errors */
1791
1792 struct vfsmount *collect_mounts(const struct path *path)
1793 {
1794 struct mount *tree;
1795 namespace_lock();
1796 if (!check_mnt(real_mount(path->mnt)))
1797 tree = ERR_PTR(-EINVAL);
1798 else
1799 tree = copy_tree(real_mount(path->mnt), path->dentry,
1800 CL_COPY_ALL | CL_PRIVATE);
1801 namespace_unlock();
1802 if (IS_ERR(tree))
1803 return ERR_CAST(tree);
1804 return &tree->mnt;
1805 }
1806
1807 void drop_collected_mounts(struct vfsmount *mnt)
1808 {
1809 namespace_lock();
1810 lock_mount_hash();
1811 umount_tree(real_mount(mnt), UMOUNT_SYNC);
1812 unlock_mount_hash();
1813 namespace_unlock();
1814 }
1815
1816 /**
1817 * clone_private_mount - create a private clone of a path
1818 *
1819 * This creates a new vfsmount, which will be the clone of @path. The new will
1820 * not be attached anywhere in the namespace and will be private (i.e. changes
1821 * to the originating mount won't be propagated into this).
1822 *
1823 * Release with mntput().
1824 */
1825 struct vfsmount *clone_private_mount(const struct path *path)
1826 {
1827 struct mount *old_mnt = real_mount(path->mnt);
1828 struct mount *new_mnt;
1829
1830 if (IS_MNT_UNBINDABLE(old_mnt))
1831 return ERR_PTR(-EINVAL);
1832
1833 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
1834 if (IS_ERR(new_mnt))
1835 return ERR_CAST(new_mnt);
1836
1837 return &new_mnt->mnt;
1838 }
1839 EXPORT_SYMBOL_GPL(clone_private_mount);
1840
1841 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
1842 struct vfsmount *root)
1843 {
1844 struct mount *mnt;
1845 int res = f(root, arg);
1846 if (res)
1847 return res;
1848 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
1849 res = f(&mnt->mnt, arg);
1850 if (res)
1851 return res;
1852 }
1853 return 0;
1854 }
1855
1856 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
1857 {
1858 struct mount *p;
1859
1860 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1861 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1862 mnt_release_group_id(p);
1863 }
1864 }
1865
1866 static int invent_group_ids(struct mount *mnt, bool recurse)
1867 {
1868 struct mount *p;
1869
1870 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1871 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1872 int err = mnt_alloc_group_id(p);
1873 if (err) {
1874 cleanup_group_ids(mnt, p);
1875 return err;
1876 }
1877 }
1878 }
1879
1880 return 0;
1881 }
1882
1883 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
1884 {
1885 unsigned int max = READ_ONCE(sysctl_mount_max);
1886 unsigned int mounts = 0, old, pending, sum;
1887 struct mount *p;
1888
1889 for (p = mnt; p; p = next_mnt(p, mnt))
1890 mounts++;
1891
1892 old = ns->mounts;
1893 pending = ns->pending_mounts;
1894 sum = old + pending;
1895 if ((old > sum) ||
1896 (pending > sum) ||
1897 (max < sum) ||
1898 (mounts > (max - sum)))
1899 return -ENOSPC;
1900
1901 ns->pending_mounts = pending + mounts;
1902 return 0;
1903 }
1904
1905 /*
1906 * @source_mnt : mount tree to be attached
1907 * @nd : place the mount tree @source_mnt is attached
1908 * @parent_nd : if non-null, detach the source_mnt from its parent and
1909 * store the parent mount and mountpoint dentry.
1910 * (done when source_mnt is moved)
1911 *
1912 * NOTE: in the table below explains the semantics when a source mount
1913 * of a given type is attached to a destination mount of a given type.
1914 * ---------------------------------------------------------------------------
1915 * | BIND MOUNT OPERATION |
1916 * |**************************************************************************
1917 * | source-->| shared | private | slave | unbindable |
1918 * | dest | | | | |
1919 * | | | | | | |
1920 * | v | | | | |
1921 * |**************************************************************************
1922 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1923 * | | | | | |
1924 * |non-shared| shared (+) | private | slave (*) | invalid |
1925 * ***************************************************************************
1926 * A bind operation clones the source mount and mounts the clone on the
1927 * destination mount.
1928 *
1929 * (++) the cloned mount is propagated to all the mounts in the propagation
1930 * tree of the destination mount and the cloned mount is added to
1931 * the peer group of the source mount.
1932 * (+) the cloned mount is created under the destination mount and is marked
1933 * as shared. The cloned mount is added to the peer group of the source
1934 * mount.
1935 * (+++) the mount is propagated to all the mounts in the propagation tree
1936 * of the destination mount and the cloned mount is made slave
1937 * of the same master as that of the source mount. The cloned mount
1938 * is marked as 'shared and slave'.
1939 * (*) the cloned mount is made a slave of the same master as that of the
1940 * source mount.
1941 *
1942 * ---------------------------------------------------------------------------
1943 * | MOVE MOUNT OPERATION |
1944 * |**************************************************************************
1945 * | source-->| shared | private | slave | unbindable |
1946 * | dest | | | | |
1947 * | | | | | | |
1948 * | v | | | | |
1949 * |**************************************************************************
1950 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1951 * | | | | | |
1952 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1953 * ***************************************************************************
1954 *
1955 * (+) the mount is moved to the destination. And is then propagated to
1956 * all the mounts in the propagation tree of the destination mount.
1957 * (+*) the mount is moved to the destination.
1958 * (+++) the mount is moved to the destination and is then propagated to
1959 * all the mounts belonging to the destination mount's propagation tree.
1960 * the mount is marked as 'shared and slave'.
1961 * (*) the mount continues to be a slave at the new location.
1962 *
1963 * if the source mount is a tree, the operations explained above is
1964 * applied to each mount in the tree.
1965 * Must be called without spinlocks held, since this function can sleep
1966 * in allocations.
1967 */
1968 static int attach_recursive_mnt(struct mount *source_mnt,
1969 struct mount *dest_mnt,
1970 struct mountpoint *dest_mp,
1971 struct path *parent_path)
1972 {
1973 HLIST_HEAD(tree_list);
1974 struct mnt_namespace *ns = dest_mnt->mnt_ns;
1975 struct mountpoint *smp;
1976 struct mount *child, *p;
1977 struct hlist_node *n;
1978 int err;
1979
1980 /* Preallocate a mountpoint in case the new mounts need
1981 * to be tucked under other mounts.
1982 */
1983 smp = get_mountpoint(source_mnt->mnt.mnt_root);
1984 if (IS_ERR(smp))
1985 return PTR_ERR(smp);
1986
1987 /* Is there space to add these mounts to the mount namespace? */
1988 if (!parent_path) {
1989 err = count_mounts(ns, source_mnt);
1990 if (err)
1991 goto out;
1992 }
1993
1994 if (IS_MNT_SHARED(dest_mnt)) {
1995 err = invent_group_ids(source_mnt, true);
1996 if (err)
1997 goto out;
1998 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
1999 lock_mount_hash();
2000 if (err)
2001 goto out_cleanup_ids;
2002 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
2003 set_mnt_shared(p);
2004 } else {
2005 lock_mount_hash();
2006 }
2007 if (parent_path) {
2008 detach_mnt(source_mnt, parent_path);
2009 attach_mnt(source_mnt, dest_mnt, dest_mp);
2010 touch_mnt_namespace(source_mnt->mnt_ns);
2011 } else {
2012 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
2013 commit_tree(source_mnt);
2014 }
2015
2016 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
2017 struct mount *q;
2018 hlist_del_init(&child->mnt_hash);
2019 q = __lookup_mnt(&child->mnt_parent->mnt,
2020 child->mnt_mountpoint);
2021 if (q)
2022 mnt_change_mountpoint(child, smp, q);
2023 commit_tree(child);
2024 }
2025 put_mountpoint(smp);
2026 unlock_mount_hash();
2027
2028 return 0;
2029
2030 out_cleanup_ids:
2031 while (!hlist_empty(&tree_list)) {
2032 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
2033 child->mnt_parent->mnt_ns->pending_mounts = 0;
2034 umount_tree(child, UMOUNT_SYNC);
2035 }
2036 unlock_mount_hash();
2037 cleanup_group_ids(source_mnt, NULL);
2038 out:
2039 ns->pending_mounts = 0;
2040
2041 read_seqlock_excl(&mount_lock);
2042 put_mountpoint(smp);
2043 read_sequnlock_excl(&mount_lock);
2044
2045 return err;
2046 }
2047
2048 static struct mountpoint *lock_mount(struct path *path)
2049 {
2050 struct vfsmount *mnt;
2051 struct dentry *dentry = path->dentry;
2052 retry:
2053 inode_lock(dentry->d_inode);
2054 if (unlikely(cant_mount(dentry))) {
2055 inode_unlock(dentry->d_inode);
2056 return ERR_PTR(-ENOENT);
2057 }
2058 namespace_lock();
2059 mnt = lookup_mnt(path);
2060 if (likely(!mnt)) {
2061 struct mountpoint *mp = get_mountpoint(dentry);
2062 if (IS_ERR(mp)) {
2063 namespace_unlock();
2064 inode_unlock(dentry->d_inode);
2065 return mp;
2066 }
2067 return mp;
2068 }
2069 namespace_unlock();
2070 inode_unlock(path->dentry->d_inode);
2071 path_put(path);
2072 path->mnt = mnt;
2073 dentry = path->dentry = dget(mnt->mnt_root);
2074 goto retry;
2075 }
2076
2077 static void unlock_mount(struct mountpoint *where)
2078 {
2079 struct dentry *dentry = where->m_dentry;
2080
2081 read_seqlock_excl(&mount_lock);
2082 put_mountpoint(where);
2083 read_sequnlock_excl(&mount_lock);
2084
2085 namespace_unlock();
2086 inode_unlock(dentry->d_inode);
2087 }
2088
2089 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
2090 {
2091 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
2092 return -EINVAL;
2093
2094 if (d_is_dir(mp->m_dentry) !=
2095 d_is_dir(mnt->mnt.mnt_root))
2096 return -ENOTDIR;
2097
2098 return attach_recursive_mnt(mnt, p, mp, NULL);
2099 }
2100
2101 /*
2102 * Sanity check the flags to change_mnt_propagation.
2103 */
2104
2105 static int flags_to_propagation_type(int ms_flags)
2106 {
2107 int type = ms_flags & ~(MS_REC | MS_SILENT);
2108
2109 /* Fail if any non-propagation flags are set */
2110 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2111 return 0;
2112 /* Only one propagation flag should be set */
2113 if (!is_power_of_2(type))
2114 return 0;
2115 return type;
2116 }
2117
2118 /*
2119 * recursively change the type of the mountpoint.
2120 */
2121 static int do_change_type(struct path *path, int ms_flags)
2122 {
2123 struct mount *m;
2124 struct mount *mnt = real_mount(path->mnt);
2125 int recurse = ms_flags & MS_REC;
2126 int type;
2127 int err = 0;
2128
2129 if (path->dentry != path->mnt->mnt_root)
2130 return -EINVAL;
2131
2132 type = flags_to_propagation_type(ms_flags);
2133 if (!type)
2134 return -EINVAL;
2135
2136 namespace_lock();
2137 if (type == MS_SHARED) {
2138 err = invent_group_ids(mnt, recurse);
2139 if (err)
2140 goto out_unlock;
2141 }
2142
2143 lock_mount_hash();
2144 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
2145 change_mnt_propagation(m, type);
2146 unlock_mount_hash();
2147
2148 out_unlock:
2149 namespace_unlock();
2150 return err;
2151 }
2152
2153 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
2154 {
2155 struct mount *child;
2156 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
2157 if (!is_subdir(child->mnt_mountpoint, dentry))
2158 continue;
2159
2160 if (child->mnt.mnt_flags & MNT_LOCKED)
2161 return true;
2162 }
2163 return false;
2164 }
2165
2166 /*
2167 * do loopback mount.
2168 */
2169 static int do_loopback(struct path *path, const char *old_name,
2170 int recurse)
2171 {
2172 struct path old_path;
2173 struct mount *mnt = NULL, *old, *parent;
2174 struct mountpoint *mp;
2175 int err;
2176 if (!old_name || !*old_name)
2177 return -EINVAL;
2178 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
2179 if (err)
2180 return err;
2181
2182 err = -EINVAL;
2183 if (mnt_ns_loop(old_path.dentry))
2184 goto out;
2185
2186 mp = lock_mount(path);
2187 err = PTR_ERR(mp);
2188 if (IS_ERR(mp))
2189 goto out;
2190
2191 old = real_mount(old_path.mnt);
2192 parent = real_mount(path->mnt);
2193
2194 err = -EINVAL;
2195 if (IS_MNT_UNBINDABLE(old))
2196 goto out2;
2197
2198 if (!check_mnt(parent))
2199 goto out2;
2200
2201 if (!check_mnt(old) && old_path.dentry->d_op != &ns_dentry_operations)
2202 goto out2;
2203
2204 if (!recurse && has_locked_children(old, old_path.dentry))
2205 goto out2;
2206
2207 if (recurse)
2208 mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE);
2209 else
2210 mnt = clone_mnt(old, old_path.dentry, 0);
2211
2212 if (IS_ERR(mnt)) {
2213 err = PTR_ERR(mnt);
2214 goto out2;
2215 }
2216
2217 mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2218
2219 err = graft_tree(mnt, parent, mp);
2220 if (err) {
2221 lock_mount_hash();
2222 umount_tree(mnt, UMOUNT_SYNC);
2223 unlock_mount_hash();
2224 }
2225 out2:
2226 unlock_mount(mp);
2227 out:
2228 path_put(&old_path);
2229 return err;
2230 }
2231
2232 static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
2233 {
2234 int error = 0;
2235 int readonly_request = 0;
2236
2237 if (ms_flags & MS_RDONLY)
2238 readonly_request = 1;
2239 if (readonly_request == __mnt_is_readonly(mnt))
2240 return 0;
2241
2242 if (readonly_request)
2243 error = mnt_make_readonly(real_mount(mnt));
2244 else
2245 __mnt_unmake_readonly(real_mount(mnt));
2246 return error;
2247 }
2248
2249 /*
2250 * change filesystem flags. dir should be a physical root of filesystem.
2251 * If you've mounted a non-root directory somewhere and want to do remount
2252 * on it - tough luck.
2253 */
2254 static int do_remount(struct path *path, int ms_flags, int sb_flags,
2255 int mnt_flags, void *data)
2256 {
2257 int err;
2258 struct super_block *sb = path->mnt->mnt_sb;
2259 struct mount *mnt = real_mount(path->mnt);
2260
2261 if (!check_mnt(mnt))
2262 return -EINVAL;
2263
2264 if (path->dentry != path->mnt->mnt_root)
2265 return -EINVAL;
2266
2267 /* Don't allow changing of locked mnt flags.
2268 *
2269 * No locks need to be held here while testing the various
2270 * MNT_LOCK flags because those flags can never be cleared
2271 * once they are set.
2272 */
2273 if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
2274 !(mnt_flags & MNT_READONLY)) {
2275 return -EPERM;
2276 }
2277 if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
2278 !(mnt_flags & MNT_NODEV)) {
2279 return -EPERM;
2280 }
2281 if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
2282 !(mnt_flags & MNT_NOSUID)) {
2283 return -EPERM;
2284 }
2285 if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
2286 !(mnt_flags & MNT_NOEXEC)) {
2287 return -EPERM;
2288 }
2289 if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
2290 ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
2291 return -EPERM;
2292 }
2293
2294 err = security_sb_remount(sb, data);
2295 if (err)
2296 return err;
2297
2298 down_write(&sb->s_umount);
2299 if (ms_flags & MS_BIND)
2300 err = change_mount_flags(path->mnt, ms_flags);
2301 else if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
2302 err = -EPERM;
2303 else
2304 err = do_remount_sb(sb, sb_flags, data, 0);
2305 if (!err) {
2306 lock_mount_hash();
2307 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2308 mnt->mnt.mnt_flags = mnt_flags;
2309 touch_mnt_namespace(mnt->mnt_ns);
2310 unlock_mount_hash();
2311 }
2312 up_write(&sb->s_umount);
2313 return err;
2314 }
2315
2316 static inline int tree_contains_unbindable(struct mount *mnt)
2317 {
2318 struct mount *p;
2319 for (p = mnt; p; p = next_mnt(p, mnt)) {
2320 if (IS_MNT_UNBINDABLE(p))
2321 return 1;
2322 }
2323 return 0;
2324 }
2325
2326 static int do_move_mount(struct path *path, const char *old_name)
2327 {
2328 struct path old_path, parent_path;
2329 struct mount *p;
2330 struct mount *old;
2331 struct mountpoint *mp;
2332 int err;
2333 if (!old_name || !*old_name)
2334 return -EINVAL;
2335 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
2336 if (err)
2337 return err;
2338
2339 mp = lock_mount(path);
2340 err = PTR_ERR(mp);
2341 if (IS_ERR(mp))
2342 goto out;
2343
2344 old = real_mount(old_path.mnt);
2345 p = real_mount(path->mnt);
2346
2347 err = -EINVAL;
2348 if (!check_mnt(p) || !check_mnt(old))
2349 goto out1;
2350
2351 if (old->mnt.mnt_flags & MNT_LOCKED)
2352 goto out1;
2353
2354 err = -EINVAL;
2355 if (old_path.dentry != old_path.mnt->mnt_root)
2356 goto out1;
2357
2358 if (!mnt_has_parent(old))
2359 goto out1;
2360
2361 if (d_is_dir(path->dentry) !=
2362 d_is_dir(old_path.dentry))
2363 goto out1;
2364 /*
2365 * Don't move a mount residing in a shared parent.
2366 */
2367 if (IS_MNT_SHARED(old->mnt_parent))
2368 goto out1;
2369 /*
2370 * Don't move a mount tree containing unbindable mounts to a destination
2371 * mount which is shared.
2372 */
2373 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
2374 goto out1;
2375 err = -ELOOP;
2376 for (; mnt_has_parent(p); p = p->mnt_parent)
2377 if (p == old)
2378 goto out1;
2379
2380 err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path);
2381 if (err)
2382 goto out1;
2383
2384 /* if the mount is moved, it should no longer be expire
2385 * automatically */
2386 list_del_init(&old->mnt_expire);
2387 out1:
2388 unlock_mount(mp);
2389 out:
2390 if (!err)
2391 path_put(&parent_path);
2392 path_put(&old_path);
2393 return err;
2394 }
2395
2396 static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
2397 {
2398 int err;
2399 const char *subtype = strchr(fstype, '.');
2400 if (subtype) {
2401 subtype++;
2402 err = -EINVAL;
2403 if (!subtype[0])
2404 goto err;
2405 } else
2406 subtype = "";
2407
2408 mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
2409 err = -ENOMEM;
2410 if (!mnt->mnt_sb->s_subtype)
2411 goto err;
2412 return mnt;
2413
2414 err:
2415 mntput(mnt);
2416 return ERR_PTR(err);
2417 }
2418
2419 /*
2420 * add a mount into a namespace's mount tree
2421 */
2422 static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
2423 {
2424 struct mountpoint *mp;
2425 struct mount *parent;
2426 int err;
2427
2428 mnt_flags &= ~MNT_INTERNAL_FLAGS;
2429
2430 mp = lock_mount(path);
2431 if (IS_ERR(mp))
2432 return PTR_ERR(mp);
2433
2434 parent = real_mount(path->mnt);
2435 err = -EINVAL;
2436 if (unlikely(!check_mnt(parent))) {
2437 /* that's acceptable only for automounts done in private ns */
2438 if (!(mnt_flags & MNT_SHRINKABLE))
2439 goto unlock;
2440 /* ... and for those we'd better have mountpoint still alive */
2441 if (!parent->mnt_ns)
2442 goto unlock;
2443 }
2444
2445 /* Refuse the same filesystem on the same mount point */
2446 err = -EBUSY;
2447 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
2448 path->mnt->mnt_root == path->dentry)
2449 goto unlock;
2450
2451 err = -EINVAL;
2452 if (d_is_symlink(newmnt->mnt.mnt_root))
2453 goto unlock;
2454
2455 newmnt->mnt.mnt_flags = mnt_flags;
2456 err = graft_tree(newmnt, parent, mp);
2457
2458 unlock:
2459 unlock_mount(mp);
2460 return err;
2461 }
2462
2463 static bool mount_too_revealing(struct vfsmount *mnt, int *new_mnt_flags);
2464
2465 /*
2466 * create a new mount for userspace and request it to be added into the
2467 * namespace's tree
2468 */
2469 static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
2470 int mnt_flags, const char *name, void *data)
2471 {
2472 struct file_system_type *type;
2473 struct vfsmount *mnt;
2474 int err;
2475
2476 if (!fstype)
2477 return -EINVAL;
2478
2479 type = get_fs_type(fstype);
2480 if (!type)
2481 return -ENODEV;
2482
2483 mnt = vfs_kern_mount(type, sb_flags, name, data);
2484 if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
2485 !mnt->mnt_sb->s_subtype)
2486 mnt = fs_set_subtype(mnt, fstype);
2487
2488 put_filesystem(type);
2489 if (IS_ERR(mnt))
2490 return PTR_ERR(mnt);
2491
2492 if (mount_too_revealing(mnt, &mnt_flags)) {
2493 mntput(mnt);
2494 return -EPERM;
2495 }
2496
2497 err = do_add_mount(real_mount(mnt), path, mnt_flags);
2498 if (err)
2499 mntput(mnt);
2500 return err;
2501 }
2502
2503 int finish_automount(struct vfsmount *m, struct path *path)
2504 {
2505 struct mount *mnt = real_mount(m);
2506 int err;
2507 /* The new mount record should have at least 2 refs to prevent it being
2508 * expired before we get a chance to add it
2509 */
2510 BUG_ON(mnt_get_count(mnt) < 2);
2511
2512 if (m->mnt_sb == path->mnt->mnt_sb &&
2513 m->mnt_root == path->dentry) {
2514 err = -ELOOP;
2515 goto fail;
2516 }
2517
2518 err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
2519 if (!err)
2520 return 0;
2521 fail:
2522 /* remove m from any expiration list it may be on */
2523 if (!list_empty(&mnt->mnt_expire)) {
2524 namespace_lock();
2525 list_del_init(&mnt->mnt_expire);
2526 namespace_unlock();
2527 }
2528 mntput(m);
2529 mntput(m);
2530 return err;
2531 }
2532
2533 /**
2534 * mnt_set_expiry - Put a mount on an expiration list
2535 * @mnt: The mount to list.
2536 * @expiry_list: The list to add the mount to.
2537 */
2538 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
2539 {
2540 namespace_lock();
2541
2542 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
2543
2544 namespace_unlock();
2545 }
2546 EXPORT_SYMBOL(mnt_set_expiry);
2547
2548 /*
2549 * process a list of expirable mountpoints with the intent of discarding any
2550 * mountpoints that aren't in use and haven't been touched since last we came
2551 * here
2552 */
2553 void mark_mounts_for_expiry(struct list_head *mounts)
2554 {
2555 struct mount *mnt, *next;
2556 LIST_HEAD(graveyard);
2557
2558 if (list_empty(mounts))
2559 return;
2560
2561 namespace_lock();
2562 lock_mount_hash();
2563
2564 /* extract from the expiration list every vfsmount that matches the
2565 * following criteria:
2566 * - only referenced by its parent vfsmount
2567 * - still marked for expiry (marked on the last call here; marks are
2568 * cleared by mntput())
2569 */
2570 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
2571 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
2572 propagate_mount_busy(mnt, 1))
2573 continue;
2574 list_move(&mnt->mnt_expire, &graveyard);
2575 }
2576 while (!list_empty(&graveyard)) {
2577 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
2578 touch_mnt_namespace(mnt->mnt_ns);
2579 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
2580 }
2581 unlock_mount_hash();
2582 namespace_unlock();
2583 }
2584
2585 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
2586
2587 /*
2588 * Ripoff of 'select_parent()'
2589 *
2590 * search the list of submounts for a given mountpoint, and move any
2591 * shrinkable submounts to the 'graveyard' list.
2592 */
2593 static int select_submounts(struct mount *parent, struct list_head *graveyard)
2594 {
2595 struct mount *this_parent = parent;
2596 struct list_head *next;
2597 int found = 0;
2598
2599 repeat:
2600 next = this_parent->mnt_mounts.next;
2601 resume:
2602 while (next != &this_parent->mnt_mounts) {
2603 struct list_head *tmp = next;
2604 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
2605
2606 next = tmp->next;
2607 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
2608 continue;
2609 /*
2610 * Descend a level if the d_mounts list is non-empty.
2611 */
2612 if (!list_empty(&mnt->mnt_mounts)) {
2613 this_parent = mnt;
2614 goto repeat;
2615 }
2616
2617 if (!propagate_mount_busy(mnt, 1)) {
2618 list_move_tail(&mnt->mnt_expire, graveyard);
2619 found++;
2620 }
2621 }
2622 /*
2623 * All done at this level ... ascend and resume the search
2624 */
2625 if (this_parent != parent) {
2626 next = this_parent->mnt_child.next;
2627 this_parent = this_parent->mnt_parent;
2628 goto resume;
2629 }
2630 return found;
2631 }
2632
2633 /*
2634 * process a list of expirable mountpoints with the intent of discarding any
2635 * submounts of a specific parent mountpoint
2636 *
2637 * mount_lock must be held for write
2638 */
2639 static void shrink_submounts(struct mount *mnt)
2640 {
2641 LIST_HEAD(graveyard);
2642 struct mount *m;
2643
2644 /* extract submounts of 'mountpoint' from the expiration list */
2645 while (select_submounts(mnt, &graveyard)) {
2646 while (!list_empty(&graveyard)) {
2647 m = list_first_entry(&graveyard, struct mount,
2648 mnt_expire);
2649 touch_mnt_namespace(m->mnt_ns);
2650 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
2651 }
2652 }
2653 }
2654
2655 /*
2656 * Some copy_from_user() implementations do not return the exact number of
2657 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
2658 * Note that this function differs from copy_from_user() in that it will oops
2659 * on bad values of `to', rather than returning a short copy.
2660 */
2661 static long exact_copy_from_user(void *to, const void __user * from,
2662 unsigned long n)
2663 {
2664 char *t = to;
2665 const char __user *f = from;
2666 char c;
2667
2668 if (!access_ok(VERIFY_READ, from, n))
2669 return n;
2670
2671 while (n) {
2672 if (__get_user(c, f)) {
2673 memset(t, 0, n);
2674 break;
2675 }
2676 *t++ = c;
2677 f++;
2678 n--;
2679 }
2680 return n;
2681 }
2682
2683 void *copy_mount_options(const void __user * data)
2684 {
2685 int i;
2686 unsigned long size;
2687 char *copy;
2688
2689 if (!data)
2690 return NULL;
2691
2692 copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
2693 if (!copy)
2694 return ERR_PTR(-ENOMEM);
2695
2696 /* We only care that *some* data at the address the user
2697 * gave us is valid. Just in case, we'll zero
2698 * the remainder of the page.
2699 */
2700 /* copy_from_user cannot cross TASK_SIZE ! */
2701 size = TASK_SIZE - (unsigned long)data;
2702 if (size > PAGE_SIZE)
2703 size = PAGE_SIZE;
2704
2705 i = size - exact_copy_from_user(copy, data, size);
2706 if (!i) {
2707 kfree(copy);
2708 return ERR_PTR(-EFAULT);
2709 }
2710 if (i != PAGE_SIZE)
2711 memset(copy + i, 0, PAGE_SIZE - i);
2712 return copy;
2713 }
2714
2715 char *copy_mount_string(const void __user *data)
2716 {
2717 return data ? strndup_user(data, PAGE_SIZE) : NULL;
2718 }
2719
2720 /*
2721 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2722 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2723 *
2724 * data is a (void *) that can point to any structure up to
2725 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2726 * information (or be NULL).
2727 *
2728 * Pre-0.97 versions of mount() didn't have a flags word.
2729 * When the flags word was introduced its top half was required
2730 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2731 * Therefore, if this magic number is present, it carries no information
2732 * and must be discarded.
2733 */
2734 long do_mount(const char *dev_name, const char __user *dir_name,
2735 const char *type_page, unsigned long flags, void *data_page)
2736 {
2737 struct path path;
2738 unsigned int mnt_flags = 0, sb_flags;
2739 int retval = 0;
2740
2741 /* Discard magic */
2742 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
2743 flags &= ~MS_MGC_MSK;
2744
2745 /* Basic sanity checks */
2746 if (data_page)
2747 ((char *)data_page)[PAGE_SIZE - 1] = 0;
2748
2749 if (flags & MS_NOUSER)
2750 return -EINVAL;
2751
2752 /* ... and get the mountpoint */
2753 retval = user_path(dir_name, &path);
2754 if (retval)
2755 return retval;
2756
2757 retval = security_sb_mount(dev_name, &path,
2758 type_page, flags, data_page);
2759 if (!retval && !may_mount())
2760 retval = -EPERM;
2761 if (!retval && (flags & SB_MANDLOCK) && !may_mandlock())
2762 retval = -EPERM;
2763 if (retval)
2764 goto dput_out;
2765
2766 /* Default to relatime unless overriden */
2767 if (!(flags & MS_NOATIME))
2768 mnt_flags |= MNT_RELATIME;
2769
2770 /* Separate the per-mountpoint flags */
2771 if (flags & MS_NOSUID)
2772 mnt_flags |= MNT_NOSUID;
2773 if (flags & MS_NODEV)
2774 mnt_flags |= MNT_NODEV;
2775 if (flags & MS_NOEXEC)
2776 mnt_flags |= MNT_NOEXEC;
2777 if (flags & MS_NOATIME)
2778 mnt_flags |= MNT_NOATIME;
2779 if (flags & MS_NODIRATIME)
2780 mnt_flags |= MNT_NODIRATIME;
2781 if (flags & MS_STRICTATIME)
2782 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
2783 if (flags & MS_RDONLY)
2784 mnt_flags |= MNT_READONLY;
2785
2786 /* The default atime for remount is preservation */
2787 if ((flags & MS_REMOUNT) &&
2788 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
2789 MS_STRICTATIME)) == 0)) {
2790 mnt_flags &= ~MNT_ATIME_MASK;
2791 mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
2792 }
2793
2794 sb_flags = flags & (SB_RDONLY |
2795 SB_SYNCHRONOUS |
2796 SB_MANDLOCK |
2797 SB_DIRSYNC |
2798 SB_SILENT |
2799 SB_POSIXACL |
2800 SB_LAZYTIME |
2801 SB_I_VERSION);
2802
2803 if (flags & MS_REMOUNT)
2804 retval = do_remount(&path, flags, sb_flags, mnt_flags,
2805 data_page);
2806 else if (flags & MS_BIND)
2807 retval = do_loopback(&path, dev_name, flags & MS_REC);
2808 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2809 retval = do_change_type(&path, flags);
2810 else if (flags & MS_MOVE)
2811 retval = do_move_mount(&path, dev_name);
2812 else
2813 retval = do_new_mount(&path, type_page, sb_flags, mnt_flags,
2814 dev_name, data_page);
2815 dput_out:
2816 path_put(&path);
2817 return retval;
2818 }
2819
2820 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
2821 {
2822 return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES);
2823 }
2824
2825 static void dec_mnt_namespaces(struct ucounts *ucounts)
2826 {
2827 dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES);
2828 }
2829
2830 static void free_mnt_ns(struct mnt_namespace *ns)
2831 {
2832 ns_free_inum(&ns->ns);
2833 dec_mnt_namespaces(ns->ucounts);
2834 put_user_ns(ns->user_ns);
2835 kfree(ns);
2836 }
2837
2838 /*
2839 * Assign a sequence number so we can detect when we attempt to bind
2840 * mount a reference to an older mount namespace into the current
2841 * mount namespace, preventing reference counting loops. A 64bit
2842 * number incrementing at 10Ghz will take 12,427 years to wrap which
2843 * is effectively never, so we can ignore the possibility.
2844 */
2845 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
2846
2847 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
2848 {
2849 struct mnt_namespace *new_ns;
2850 struct ucounts *ucounts;
2851 int ret;
2852
2853 ucounts = inc_mnt_namespaces(user_ns);
2854 if (!ucounts)
2855 return ERR_PTR(-ENOSPC);
2856
2857 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
2858 if (!new_ns) {
2859 dec_mnt_namespaces(ucounts);
2860 return ERR_PTR(-ENOMEM);
2861 }
2862 ret = ns_alloc_inum(&new_ns->ns);
2863 if (ret) {
2864 kfree(new_ns);
2865 dec_mnt_namespaces(ucounts);
2866 return ERR_PTR(ret);
2867 }
2868 new_ns->ns.ops = &mntns_operations;
2869 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
2870 atomic_set(&new_ns->count, 1);
2871 new_ns->root = NULL;
2872 INIT_LIST_HEAD(&new_ns->list);
2873 init_waitqueue_head(&new_ns->poll);
2874 new_ns->event = 0;
2875 new_ns->user_ns = get_user_ns(user_ns);
2876 new_ns->ucounts = ucounts;
2877 new_ns->mounts = 0;
2878 new_ns->pending_mounts = 0;
2879 return new_ns;
2880 }
2881
2882 __latent_entropy
2883 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
2884 struct user_namespace *user_ns, struct fs_struct *new_fs)
2885 {
2886 struct mnt_namespace *new_ns;
2887 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
2888 struct mount *p, *q;
2889 struct mount *old;
2890 struct mount *new;
2891 int copy_flags;
2892
2893 BUG_ON(!ns);
2894
2895 if (likely(!(flags & CLONE_NEWNS))) {
2896 get_mnt_ns(ns);
2897 return ns;
2898 }
2899
2900 old = ns->root;
2901
2902 new_ns = alloc_mnt_ns(user_ns);
2903 if (IS_ERR(new_ns))
2904 return new_ns;
2905
2906 namespace_lock();
2907 /* First pass: copy the tree topology */
2908 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
2909 if (user_ns != ns->user_ns)
2910 copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
2911 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
2912 if (IS_ERR(new)) {
2913 namespace_unlock();
2914 free_mnt_ns(new_ns);
2915 return ERR_CAST(new);
2916 }
2917 new_ns->root = new;
2918 list_add_tail(&new_ns->list, &new->mnt_list);
2919
2920 /*
2921 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2922 * as belonging to new namespace. We have already acquired a private
2923 * fs_struct, so tsk->fs->lock is not needed.
2924 */
2925 p = old;
2926 q = new;
2927 while (p) {
2928 q->mnt_ns = new_ns;
2929 new_ns->mounts++;
2930 if (new_fs) {
2931 if (&p->mnt == new_fs->root.mnt) {
2932 new_fs->root.mnt = mntget(&q->mnt);
2933 rootmnt = &p->mnt;
2934 }
2935 if (&p->mnt == new_fs->pwd.mnt) {
2936 new_fs->pwd.mnt = mntget(&q->mnt);
2937 pwdmnt = &p->mnt;
2938 }
2939 }
2940 p = next_mnt(p, old);
2941 q = next_mnt(q, new);
2942 if (!q)
2943 break;
2944 while (p->mnt.mnt_root != q->mnt.mnt_root)
2945 p = next_mnt(p, old);
2946 }
2947 namespace_unlock();
2948
2949 if (rootmnt)
2950 mntput(rootmnt);
2951 if (pwdmnt)
2952 mntput(pwdmnt);
2953
2954 return new_ns;
2955 }
2956
2957 /**
2958 * create_mnt_ns - creates a private namespace and adds a root filesystem
2959 * @mnt: pointer to the new root filesystem mountpoint
2960 */
2961 static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
2962 {
2963 struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns);
2964 if (!IS_ERR(new_ns)) {
2965 struct mount *mnt = real_mount(m);
2966 mnt->mnt_ns = new_ns;
2967 new_ns->root = mnt;
2968 new_ns->mounts++;
2969 list_add(&mnt->mnt_list, &new_ns->list);
2970 } else {
2971 mntput(m);
2972 }
2973 return new_ns;
2974 }
2975
2976 struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
2977 {
2978 struct mnt_namespace *ns;
2979 struct super_block *s;
2980 struct path path;
2981 int err;
2982
2983 ns = create_mnt_ns(mnt);
2984 if (IS_ERR(ns))
2985 return ERR_CAST(ns);
2986
2987 err = vfs_path_lookup(mnt->mnt_root, mnt,
2988 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
2989
2990 put_mnt_ns(ns);
2991
2992 if (err)
2993 return ERR_PTR(err);
2994
2995 /* trade a vfsmount reference for active sb one */
2996 s = path.mnt->mnt_sb;
2997 atomic_inc(&s->s_active);
2998 mntput(path.mnt);
2999 /* lock the sucker */
3000 down_write(&s->s_umount);
3001 /* ... and return the root of (sub)tree on it */
3002 return path.dentry;
3003 }
3004 EXPORT_SYMBOL(mount_subtree);
3005
3006 int ksys_mount(char __user *dev_name, char __user *dir_name, char __user *type,
3007 unsigned long flags, void __user *data)
3008 {
3009 int ret;
3010 char *kernel_type;
3011 char *kernel_dev;
3012 void *options;
3013
3014 kernel_type = copy_mount_string(type);
3015 ret = PTR_ERR(kernel_type);
3016 if (IS_ERR(kernel_type))
3017 goto out_type;
3018
3019 kernel_dev = copy_mount_string(dev_name);
3020 ret = PTR_ERR(kernel_dev);
3021 if (IS_ERR(kernel_dev))
3022 goto out_dev;
3023
3024 options = copy_mount_options(data);
3025 ret = PTR_ERR(options);
3026 if (IS_ERR(options))
3027 goto out_data;
3028
3029 ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options);
3030
3031 kfree(options);
3032 out_data:
3033 kfree(kernel_dev);
3034 out_dev:
3035 kfree(kernel_type);
3036 out_type:
3037 return ret;
3038 }
3039
3040 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
3041 char __user *, type, unsigned long, flags, void __user *, data)
3042 {
3043 return ksys_mount(dev_name, dir_name, type, flags, data);
3044 }
3045
3046 /*
3047 * Return true if path is reachable from root
3048 *
3049 * namespace_sem or mount_lock is held
3050 */
3051 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
3052 const struct path *root)
3053 {
3054 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
3055 dentry = mnt->mnt_mountpoint;
3056 mnt = mnt->mnt_parent;
3057 }
3058 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
3059 }
3060
3061 bool path_is_under(const struct path *path1, const struct path *path2)
3062 {
3063 bool res;
3064 read_seqlock_excl(&mount_lock);
3065 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
3066 read_sequnlock_excl(&mount_lock);
3067 return res;
3068 }
3069 EXPORT_SYMBOL(path_is_under);
3070
3071 /*
3072 * pivot_root Semantics:
3073 * Moves the root file system of the current process to the directory put_old,
3074 * makes new_root as the new root file system of the current process, and sets
3075 * root/cwd of all processes which had them on the current root to new_root.
3076 *
3077 * Restrictions:
3078 * The new_root and put_old must be directories, and must not be on the
3079 * same file system as the current process root. The put_old must be
3080 * underneath new_root, i.e. adding a non-zero number of /.. to the string
3081 * pointed to by put_old must yield the same directory as new_root. No other
3082 * file system may be mounted on put_old. After all, new_root is a mountpoint.
3083 *
3084 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
3085 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
3086 * in this situation.
3087 *
3088 * Notes:
3089 * - we don't move root/cwd if they are not at the root (reason: if something
3090 * cared enough to change them, it's probably wrong to force them elsewhere)
3091 * - it's okay to pick a root that isn't the root of a file system, e.g.
3092 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
3093 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
3094 * first.
3095 */
3096 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
3097 const char __user *, put_old)
3098 {
3099 struct path new, old, parent_path, root_parent, root;
3100 struct mount *new_mnt, *root_mnt, *old_mnt;
3101 struct mountpoint *old_mp, *root_mp;
3102 int error;
3103
3104 if (!may_mount())
3105 return -EPERM;
3106
3107 error = user_path_dir(new_root, &new);
3108 if (error)
3109 goto out0;
3110
3111 error = user_path_dir(put_old, &old);
3112 if (error)
3113 goto out1;
3114
3115 error = security_sb_pivotroot(&old, &new);
3116 if (error)
3117 goto out2;
3118
3119 get_fs_root(current->fs, &root);
3120 old_mp = lock_mount(&old);
3121 error = PTR_ERR(old_mp);
3122 if (IS_ERR(old_mp))
3123 goto out3;
3124
3125 error = -EINVAL;
3126 new_mnt = real_mount(new.mnt);
3127 root_mnt = real_mount(root.mnt);
3128 old_mnt = real_mount(old.mnt);
3129 if (IS_MNT_SHARED(old_mnt) ||
3130 IS_MNT_SHARED(new_mnt->mnt_parent) ||
3131 IS_MNT_SHARED(root_mnt->mnt_parent))
3132 goto out4;
3133 if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
3134 goto out4;
3135 if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
3136 goto out4;
3137 error = -ENOENT;
3138 if (d_unlinked(new.dentry))
3139 goto out4;
3140 error = -EBUSY;
3141 if (new_mnt == root_mnt || old_mnt == root_mnt)
3142 goto out4; /* loop, on the same file system */
3143 error = -EINVAL;
3144 if (root.mnt->mnt_root != root.dentry)
3145 goto out4; /* not a mountpoint */
3146 if (!mnt_has_parent(root_mnt))
3147 goto out4; /* not attached */
3148 root_mp = root_mnt->mnt_mp;
3149 if (new.mnt->mnt_root != new.dentry)
3150 goto out4; /* not a mountpoint */
3151 if (!mnt_has_parent(new_mnt))
3152 goto out4; /* not attached */
3153 /* make sure we can reach put_old from new_root */
3154 if (!is_path_reachable(old_mnt, old.dentry, &new))
3155 goto out4;
3156 /* make certain new is below the root */
3157 if (!is_path_reachable(new_mnt, new.dentry, &root))
3158 goto out4;
3159 root_mp->m_count++; /* pin it so it won't go away */
3160 lock_mount_hash();
3161 detach_mnt(new_mnt, &parent_path);
3162 detach_mnt(root_mnt, &root_parent);
3163 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
3164 new_mnt->mnt.mnt_flags |= MNT_LOCKED;
3165 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
3166 }
3167 /* mount old root on put_old */
3168 attach_mnt(root_mnt, old_mnt, old_mp);
3169 /* mount new_root on / */
3170 attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
3171 touch_mnt_namespace(current->nsproxy->mnt_ns);
3172 /* A moved mount should not expire automatically */
3173 list_del_init(&new_mnt->mnt_expire);
3174 put_mountpoint(root_mp);
3175 unlock_mount_hash();
3176 chroot_fs_refs(&root, &new);
3177 error = 0;
3178 out4:
3179 unlock_mount(old_mp);
3180 if (!error) {
3181 path_put(&root_parent);
3182 path_put(&parent_path);
3183 }
3184 out3:
3185 path_put(&root);
3186 out2:
3187 path_put(&old);
3188 out1:
3189 path_put(&new);
3190 out0:
3191 return error;
3192 }
3193
3194 static void __init init_mount_tree(void)
3195 {
3196 struct vfsmount *mnt;
3197 struct mnt_namespace *ns;
3198 struct path root;
3199 struct file_system_type *type;
3200
3201 type = get_fs_type("rootfs");
3202 if (!type)
3203 panic("Can't find rootfs type");
3204 mnt = vfs_kern_mount(type, 0, "rootfs", NULL);
3205 put_filesystem(type);
3206 if (IS_ERR(mnt))
3207 panic("Can't create rootfs");
3208
3209 ns = create_mnt_ns(mnt);
3210 if (IS_ERR(ns))
3211 panic("Can't allocate initial namespace");
3212
3213 init_task.nsproxy->mnt_ns = ns;
3214 get_mnt_ns(ns);
3215
3216 root.mnt = mnt;
3217 root.dentry = mnt->mnt_root;
3218 mnt->mnt_flags |= MNT_LOCKED;
3219
3220 set_fs_pwd(current->fs, &root);
3221 set_fs_root(current->fs, &root);
3222 }
3223
3224 void __init mnt_init(void)
3225 {
3226 int err;
3227
3228 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
3229 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3230
3231 mount_hashtable = alloc_large_system_hash("Mount-cache",
3232 sizeof(struct hlist_head),
3233 mhash_entries, 19,
3234 HASH_ZERO,
3235 &m_hash_shift, &m_hash_mask, 0, 0);
3236 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
3237 sizeof(struct hlist_head),
3238 mphash_entries, 19,
3239 HASH_ZERO,
3240 &mp_hash_shift, &mp_hash_mask, 0, 0);
3241
3242 if (!mount_hashtable || !mountpoint_hashtable)
3243 panic("Failed to allocate mount hash table\n");
3244
3245 kernfs_init();
3246
3247 err = sysfs_init();
3248 if (err)
3249 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
3250 __func__, err);
3251 fs_kobj = kobject_create_and_add("fs", NULL);
3252 if (!fs_kobj)
3253 printk(KERN_WARNING "%s: kobj create error\n", __func__);
3254 init_rootfs();
3255 init_mount_tree();
3256 }
3257
3258 void put_mnt_ns(struct mnt_namespace *ns)
3259 {
3260 if (!atomic_dec_and_test(&ns->count))
3261 return;
3262 drop_collected_mounts(&ns->root->mnt);
3263 free_mnt_ns(ns);
3264 }
3265
3266 struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
3267 {
3268 struct vfsmount *mnt;
3269 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, data);
3270 if (!IS_ERR(mnt)) {
3271 /*
3272 * it is a longterm mount, don't release mnt until
3273 * we unmount before file sys is unregistered
3274 */
3275 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
3276 }
3277 return mnt;
3278 }
3279 EXPORT_SYMBOL_GPL(kern_mount_data);
3280
3281 void kern_unmount(struct vfsmount *mnt)
3282 {
3283 /* release long term mount so mount point can be released */
3284 if (!IS_ERR_OR_NULL(mnt)) {
3285 real_mount(mnt)->mnt_ns = NULL;
3286 synchronize_rcu(); /* yecchhh... */
3287 mntput(mnt);
3288 }
3289 }
3290 EXPORT_SYMBOL(kern_unmount);
3291
3292 bool our_mnt(struct vfsmount *mnt)
3293 {
3294 return check_mnt(real_mount(mnt));
3295 }
3296
3297 bool current_chrooted(void)
3298 {
3299 /* Does the current process have a non-standard root */
3300 struct path ns_root;
3301 struct path fs_root;
3302 bool chrooted;
3303
3304 /* Find the namespace root */
3305 ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
3306 ns_root.dentry = ns_root.mnt->mnt_root;
3307 path_get(&ns_root);
3308 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
3309 ;
3310
3311 get_fs_root(current->fs, &fs_root);
3312
3313 chrooted = !path_equal(&fs_root, &ns_root);
3314
3315 path_put(&fs_root);
3316 path_put(&ns_root);
3317
3318 return chrooted;
3319 }
3320
3321 static bool mnt_already_visible(struct mnt_namespace *ns, struct vfsmount *new,
3322 int *new_mnt_flags)
3323 {
3324 int new_flags = *new_mnt_flags;
3325 struct mount *mnt;
3326 bool visible = false;
3327
3328 down_read(&namespace_sem);
3329 list_for_each_entry(mnt, &ns->list, mnt_list) {
3330 struct mount *child;
3331 int mnt_flags;
3332
3333 if (mnt->mnt.mnt_sb->s_type != new->mnt_sb->s_type)
3334 continue;
3335
3336 /* This mount is not fully visible if it's root directory
3337 * is not the root directory of the filesystem.
3338 */
3339 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
3340 continue;
3341
3342 /* A local view of the mount flags */
3343 mnt_flags = mnt->mnt.mnt_flags;
3344
3345 /* Don't miss readonly hidden in the superblock flags */
3346 if (sb_rdonly(mnt->mnt.mnt_sb))
3347 mnt_flags |= MNT_LOCK_READONLY;
3348
3349 /* Verify the mount flags are equal to or more permissive
3350 * than the proposed new mount.
3351 */
3352 if ((mnt_flags & MNT_LOCK_READONLY) &&
3353 !(new_flags & MNT_READONLY))
3354 continue;
3355 if ((mnt_flags & MNT_LOCK_ATIME) &&
3356 ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
3357 continue;
3358
3359 /* This mount is not fully visible if there are any
3360 * locked child mounts that cover anything except for
3361 * empty directories.
3362 */
3363 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
3364 struct inode *inode = child->mnt_mountpoint->d_inode;
3365 /* Only worry about locked mounts */
3366 if (!(child->mnt.mnt_flags & MNT_LOCKED))
3367 continue;
3368 /* Is the directory permanetly empty? */
3369 if (!is_empty_dir_inode(inode))
3370 goto next;
3371 }
3372 /* Preserve the locked attributes */
3373 *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
3374 MNT_LOCK_ATIME);
3375 visible = true;
3376 goto found;
3377 next: ;
3378 }
3379 found:
3380 up_read(&namespace_sem);
3381 return visible;
3382 }
3383
3384 static bool mount_too_revealing(struct vfsmount *mnt, int *new_mnt_flags)
3385 {
3386 const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
3387 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
3388 unsigned long s_iflags;
3389
3390 if (ns->user_ns == &init_user_ns)
3391 return false;
3392
3393 /* Can this filesystem be too revealing? */
3394 s_iflags = mnt->mnt_sb->s_iflags;
3395 if (!(s_iflags & SB_I_USERNS_VISIBLE))
3396 return false;
3397
3398 if ((s_iflags & required_iflags) != required_iflags) {
3399 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
3400 required_iflags);
3401 return true;
3402 }
3403
3404 return !mnt_already_visible(ns, mnt, new_mnt_flags);
3405 }
3406
3407 bool mnt_may_suid(struct vfsmount *mnt)
3408 {
3409 /*
3410 * Foreign mounts (accessed via fchdir or through /proc
3411 * symlinks) are always treated as if they are nosuid. This
3412 * prevents namespaces from trusting potentially unsafe
3413 * suid/sgid bits, file caps, or security labels that originate
3414 * in other namespaces.
3415 */
3416 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
3417 current_in_userns(mnt->mnt_sb->s_user_ns);
3418 }
3419
3420 static struct ns_common *mntns_get(struct task_struct *task)
3421 {
3422 struct ns_common *ns = NULL;
3423 struct nsproxy *nsproxy;
3424
3425 task_lock(task);
3426 nsproxy = task->nsproxy;
3427 if (nsproxy) {
3428 ns = &nsproxy->mnt_ns->ns;
3429 get_mnt_ns(to_mnt_ns(ns));
3430 }
3431 task_unlock(task);
3432
3433 return ns;
3434 }
3435
3436 static void mntns_put(struct ns_common *ns)
3437 {
3438 put_mnt_ns(to_mnt_ns(ns));
3439 }
3440
3441 static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
3442 {
3443 struct fs_struct *fs = current->fs;
3444 struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns;
3445 struct path root;
3446 int err;
3447
3448 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
3449 !ns_capable(current_user_ns(), CAP_SYS_CHROOT) ||
3450 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
3451 return -EPERM;
3452
3453 if (fs->users != 1)
3454 return -EINVAL;
3455
3456 get_mnt_ns(mnt_ns);
3457 old_mnt_ns = nsproxy->mnt_ns;
3458 nsproxy->mnt_ns = mnt_ns;
3459
3460 /* Find the root */
3461 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
3462 "/", LOOKUP_DOWN, &root);
3463 if (err) {
3464 /* revert to old namespace */
3465 nsproxy->mnt_ns = old_mnt_ns;
3466 put_mnt_ns(mnt_ns);
3467 return err;
3468 }
3469
3470 put_mnt_ns(old_mnt_ns);
3471
3472 /* Update the pwd and root */
3473 set_fs_pwd(fs, &root);
3474 set_fs_root(fs, &root);
3475
3476 path_put(&root);
3477 return 0;
3478 }
3479
3480 static struct user_namespace *mntns_owner(struct ns_common *ns)
3481 {
3482 return to_mnt_ns(ns)->user_ns;
3483 }
3484
3485 const struct proc_ns_operations mntns_operations = {
3486 .name = "mnt",
3487 .type = CLONE_NEWNS,
3488 .get = mntns_get,
3489 .put = mntns_put,
3490 .install = mntns_install,
3491 .owner = mntns_owner,
3492 };