]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/namespace.c
bury struct proc_ns in fs/proc
[mirror_ubuntu-artful-kernel.git] / fs / namespace.c
1 /*
2 * linux/fs/namespace.c
3 *
4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
6 *
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
8 * Heavily rewritten.
9 */
10
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/idr.h>
19 #include <linux/init.h> /* init_rootfs */
20 #include <linux/fs_struct.h> /* get_fs_root et.al. */
21 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
22 #include <linux/uaccess.h>
23 #include <linux/proc_ns.h>
24 #include <linux/magic.h>
25 #include <linux/bootmem.h>
26 #include <linux/task_work.h>
27 #include "pnode.h"
28 #include "internal.h"
29
30 static unsigned int m_hash_mask __read_mostly;
31 static unsigned int m_hash_shift __read_mostly;
32 static unsigned int mp_hash_mask __read_mostly;
33 static unsigned int mp_hash_shift __read_mostly;
34
35 static __initdata unsigned long mhash_entries;
36 static int __init set_mhash_entries(char *str)
37 {
38 if (!str)
39 return 0;
40 mhash_entries = simple_strtoul(str, &str, 0);
41 return 1;
42 }
43 __setup("mhash_entries=", set_mhash_entries);
44
45 static __initdata unsigned long mphash_entries;
46 static int __init set_mphash_entries(char *str)
47 {
48 if (!str)
49 return 0;
50 mphash_entries = simple_strtoul(str, &str, 0);
51 return 1;
52 }
53 __setup("mphash_entries=", set_mphash_entries);
54
55 static u64 event;
56 static DEFINE_IDA(mnt_id_ida);
57 static DEFINE_IDA(mnt_group_ida);
58 static DEFINE_SPINLOCK(mnt_id_lock);
59 static int mnt_id_start = 0;
60 static int mnt_group_start = 1;
61
62 static struct hlist_head *mount_hashtable __read_mostly;
63 static struct hlist_head *mountpoint_hashtable __read_mostly;
64 static struct kmem_cache *mnt_cache __read_mostly;
65 static DECLARE_RWSEM(namespace_sem);
66
67 /* /sys/fs */
68 struct kobject *fs_kobj;
69 EXPORT_SYMBOL_GPL(fs_kobj);
70
71 /*
72 * vfsmount lock may be taken for read to prevent changes to the
73 * vfsmount hash, ie. during mountpoint lookups or walking back
74 * up the tree.
75 *
76 * It should be taken for write in all cases where the vfsmount
77 * tree or hash is modified or when a vfsmount structure is modified.
78 */
79 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
80
81 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
82 {
83 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
84 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
85 tmp = tmp + (tmp >> m_hash_shift);
86 return &mount_hashtable[tmp & m_hash_mask];
87 }
88
89 static inline struct hlist_head *mp_hash(struct dentry *dentry)
90 {
91 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
92 tmp = tmp + (tmp >> mp_hash_shift);
93 return &mountpoint_hashtable[tmp & mp_hash_mask];
94 }
95
96 /*
97 * allocation is serialized by namespace_sem, but we need the spinlock to
98 * serialize with freeing.
99 */
100 static int mnt_alloc_id(struct mount *mnt)
101 {
102 int res;
103
104 retry:
105 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
106 spin_lock(&mnt_id_lock);
107 res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
108 if (!res)
109 mnt_id_start = mnt->mnt_id + 1;
110 spin_unlock(&mnt_id_lock);
111 if (res == -EAGAIN)
112 goto retry;
113
114 return res;
115 }
116
117 static void mnt_free_id(struct mount *mnt)
118 {
119 int id = mnt->mnt_id;
120 spin_lock(&mnt_id_lock);
121 ida_remove(&mnt_id_ida, id);
122 if (mnt_id_start > id)
123 mnt_id_start = id;
124 spin_unlock(&mnt_id_lock);
125 }
126
127 /*
128 * Allocate a new peer group ID
129 *
130 * mnt_group_ida is protected by namespace_sem
131 */
132 static int mnt_alloc_group_id(struct mount *mnt)
133 {
134 int res;
135
136 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
137 return -ENOMEM;
138
139 res = ida_get_new_above(&mnt_group_ida,
140 mnt_group_start,
141 &mnt->mnt_group_id);
142 if (!res)
143 mnt_group_start = mnt->mnt_group_id + 1;
144
145 return res;
146 }
147
148 /*
149 * Release a peer group ID
150 */
151 void mnt_release_group_id(struct mount *mnt)
152 {
153 int id = mnt->mnt_group_id;
154 ida_remove(&mnt_group_ida, id);
155 if (mnt_group_start > id)
156 mnt_group_start = id;
157 mnt->mnt_group_id = 0;
158 }
159
160 /*
161 * vfsmount lock must be held for read
162 */
163 static inline void mnt_add_count(struct mount *mnt, int n)
164 {
165 #ifdef CONFIG_SMP
166 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
167 #else
168 preempt_disable();
169 mnt->mnt_count += n;
170 preempt_enable();
171 #endif
172 }
173
174 /*
175 * vfsmount lock must be held for write
176 */
177 unsigned int mnt_get_count(struct mount *mnt)
178 {
179 #ifdef CONFIG_SMP
180 unsigned int count = 0;
181 int cpu;
182
183 for_each_possible_cpu(cpu) {
184 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
185 }
186
187 return count;
188 #else
189 return mnt->mnt_count;
190 #endif
191 }
192
193 static struct mount *alloc_vfsmnt(const char *name)
194 {
195 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
196 if (mnt) {
197 int err;
198
199 err = mnt_alloc_id(mnt);
200 if (err)
201 goto out_free_cache;
202
203 if (name) {
204 mnt->mnt_devname = kstrdup(name, GFP_KERNEL);
205 if (!mnt->mnt_devname)
206 goto out_free_id;
207 }
208
209 #ifdef CONFIG_SMP
210 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
211 if (!mnt->mnt_pcp)
212 goto out_free_devname;
213
214 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
215 #else
216 mnt->mnt_count = 1;
217 mnt->mnt_writers = 0;
218 #endif
219
220 INIT_HLIST_NODE(&mnt->mnt_hash);
221 INIT_LIST_HEAD(&mnt->mnt_child);
222 INIT_LIST_HEAD(&mnt->mnt_mounts);
223 INIT_LIST_HEAD(&mnt->mnt_list);
224 INIT_LIST_HEAD(&mnt->mnt_expire);
225 INIT_LIST_HEAD(&mnt->mnt_share);
226 INIT_LIST_HEAD(&mnt->mnt_slave_list);
227 INIT_LIST_HEAD(&mnt->mnt_slave);
228 INIT_HLIST_NODE(&mnt->mnt_mp_list);
229 #ifdef CONFIG_FSNOTIFY
230 INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
231 #endif
232 }
233 return mnt;
234
235 #ifdef CONFIG_SMP
236 out_free_devname:
237 kfree(mnt->mnt_devname);
238 #endif
239 out_free_id:
240 mnt_free_id(mnt);
241 out_free_cache:
242 kmem_cache_free(mnt_cache, mnt);
243 return NULL;
244 }
245
246 /*
247 * Most r/o checks on a fs are for operations that take
248 * discrete amounts of time, like a write() or unlink().
249 * We must keep track of when those operations start
250 * (for permission checks) and when they end, so that
251 * we can determine when writes are able to occur to
252 * a filesystem.
253 */
254 /*
255 * __mnt_is_readonly: check whether a mount is read-only
256 * @mnt: the mount to check for its write status
257 *
258 * This shouldn't be used directly ouside of the VFS.
259 * It does not guarantee that the filesystem will stay
260 * r/w, just that it is right *now*. This can not and
261 * should not be used in place of IS_RDONLY(inode).
262 * mnt_want/drop_write() will _keep_ the filesystem
263 * r/w.
264 */
265 int __mnt_is_readonly(struct vfsmount *mnt)
266 {
267 if (mnt->mnt_flags & MNT_READONLY)
268 return 1;
269 if (mnt->mnt_sb->s_flags & MS_RDONLY)
270 return 1;
271 return 0;
272 }
273 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
274
275 static inline void mnt_inc_writers(struct mount *mnt)
276 {
277 #ifdef CONFIG_SMP
278 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
279 #else
280 mnt->mnt_writers++;
281 #endif
282 }
283
284 static inline void mnt_dec_writers(struct mount *mnt)
285 {
286 #ifdef CONFIG_SMP
287 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
288 #else
289 mnt->mnt_writers--;
290 #endif
291 }
292
293 static unsigned int mnt_get_writers(struct mount *mnt)
294 {
295 #ifdef CONFIG_SMP
296 unsigned int count = 0;
297 int cpu;
298
299 for_each_possible_cpu(cpu) {
300 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
301 }
302
303 return count;
304 #else
305 return mnt->mnt_writers;
306 #endif
307 }
308
309 static int mnt_is_readonly(struct vfsmount *mnt)
310 {
311 if (mnt->mnt_sb->s_readonly_remount)
312 return 1;
313 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
314 smp_rmb();
315 return __mnt_is_readonly(mnt);
316 }
317
318 /*
319 * Most r/o & frozen checks on a fs are for operations that take discrete
320 * amounts of time, like a write() or unlink(). We must keep track of when
321 * those operations start (for permission checks) and when they end, so that we
322 * can determine when writes are able to occur to a filesystem.
323 */
324 /**
325 * __mnt_want_write - get write access to a mount without freeze protection
326 * @m: the mount on which to take a write
327 *
328 * This tells the low-level filesystem that a write is about to be performed to
329 * it, and makes sure that writes are allowed (mnt it read-write) before
330 * returning success. This operation does not protect against filesystem being
331 * frozen. When the write operation is finished, __mnt_drop_write() must be
332 * called. This is effectively a refcount.
333 */
334 int __mnt_want_write(struct vfsmount *m)
335 {
336 struct mount *mnt = real_mount(m);
337 int ret = 0;
338
339 preempt_disable();
340 mnt_inc_writers(mnt);
341 /*
342 * The store to mnt_inc_writers must be visible before we pass
343 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
344 * incremented count after it has set MNT_WRITE_HOLD.
345 */
346 smp_mb();
347 while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
348 cpu_relax();
349 /*
350 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
351 * be set to match its requirements. So we must not load that until
352 * MNT_WRITE_HOLD is cleared.
353 */
354 smp_rmb();
355 if (mnt_is_readonly(m)) {
356 mnt_dec_writers(mnt);
357 ret = -EROFS;
358 }
359 preempt_enable();
360
361 return ret;
362 }
363
364 /**
365 * mnt_want_write - get write access to a mount
366 * @m: the mount on which to take a write
367 *
368 * This tells the low-level filesystem that a write is about to be performed to
369 * it, and makes sure that writes are allowed (mount is read-write, filesystem
370 * is not frozen) before returning success. When the write operation is
371 * finished, mnt_drop_write() must be called. This is effectively a refcount.
372 */
373 int mnt_want_write(struct vfsmount *m)
374 {
375 int ret;
376
377 sb_start_write(m->mnt_sb);
378 ret = __mnt_want_write(m);
379 if (ret)
380 sb_end_write(m->mnt_sb);
381 return ret;
382 }
383 EXPORT_SYMBOL_GPL(mnt_want_write);
384
385 /**
386 * mnt_clone_write - get write access to a mount
387 * @mnt: the mount on which to take a write
388 *
389 * This is effectively like mnt_want_write, except
390 * it must only be used to take an extra write reference
391 * on a mountpoint that we already know has a write reference
392 * on it. This allows some optimisation.
393 *
394 * After finished, mnt_drop_write must be called as usual to
395 * drop the reference.
396 */
397 int mnt_clone_write(struct vfsmount *mnt)
398 {
399 /* superblock may be r/o */
400 if (__mnt_is_readonly(mnt))
401 return -EROFS;
402 preempt_disable();
403 mnt_inc_writers(real_mount(mnt));
404 preempt_enable();
405 return 0;
406 }
407 EXPORT_SYMBOL_GPL(mnt_clone_write);
408
409 /**
410 * __mnt_want_write_file - get write access to a file's mount
411 * @file: the file who's mount on which to take a write
412 *
413 * This is like __mnt_want_write, but it takes a file and can
414 * do some optimisations if the file is open for write already
415 */
416 int __mnt_want_write_file(struct file *file)
417 {
418 if (!(file->f_mode & FMODE_WRITER))
419 return __mnt_want_write(file->f_path.mnt);
420 else
421 return mnt_clone_write(file->f_path.mnt);
422 }
423
424 /**
425 * mnt_want_write_file - get write access to a file's mount
426 * @file: the file who's mount on which to take a write
427 *
428 * This is like mnt_want_write, but it takes a file and can
429 * do some optimisations if the file is open for write already
430 */
431 int mnt_want_write_file(struct file *file)
432 {
433 int ret;
434
435 sb_start_write(file->f_path.mnt->mnt_sb);
436 ret = __mnt_want_write_file(file);
437 if (ret)
438 sb_end_write(file->f_path.mnt->mnt_sb);
439 return ret;
440 }
441 EXPORT_SYMBOL_GPL(mnt_want_write_file);
442
443 /**
444 * __mnt_drop_write - give up write access to a mount
445 * @mnt: the mount on which to give up write access
446 *
447 * Tells the low-level filesystem that we are done
448 * performing writes to it. Must be matched with
449 * __mnt_want_write() call above.
450 */
451 void __mnt_drop_write(struct vfsmount *mnt)
452 {
453 preempt_disable();
454 mnt_dec_writers(real_mount(mnt));
455 preempt_enable();
456 }
457
458 /**
459 * mnt_drop_write - give up write access to a mount
460 * @mnt: the mount on which to give up write access
461 *
462 * Tells the low-level filesystem that we are done performing writes to it and
463 * also allows filesystem to be frozen again. Must be matched with
464 * mnt_want_write() call above.
465 */
466 void mnt_drop_write(struct vfsmount *mnt)
467 {
468 __mnt_drop_write(mnt);
469 sb_end_write(mnt->mnt_sb);
470 }
471 EXPORT_SYMBOL_GPL(mnt_drop_write);
472
473 void __mnt_drop_write_file(struct file *file)
474 {
475 __mnt_drop_write(file->f_path.mnt);
476 }
477
478 void mnt_drop_write_file(struct file *file)
479 {
480 mnt_drop_write(file->f_path.mnt);
481 }
482 EXPORT_SYMBOL(mnt_drop_write_file);
483
484 static int mnt_make_readonly(struct mount *mnt)
485 {
486 int ret = 0;
487
488 lock_mount_hash();
489 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
490 /*
491 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
492 * should be visible before we do.
493 */
494 smp_mb();
495
496 /*
497 * With writers on hold, if this value is zero, then there are
498 * definitely no active writers (although held writers may subsequently
499 * increment the count, they'll have to wait, and decrement it after
500 * seeing MNT_READONLY).
501 *
502 * It is OK to have counter incremented on one CPU and decremented on
503 * another: the sum will add up correctly. The danger would be when we
504 * sum up each counter, if we read a counter before it is incremented,
505 * but then read another CPU's count which it has been subsequently
506 * decremented from -- we would see more decrements than we should.
507 * MNT_WRITE_HOLD protects against this scenario, because
508 * mnt_want_write first increments count, then smp_mb, then spins on
509 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
510 * we're counting up here.
511 */
512 if (mnt_get_writers(mnt) > 0)
513 ret = -EBUSY;
514 else
515 mnt->mnt.mnt_flags |= MNT_READONLY;
516 /*
517 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
518 * that become unheld will see MNT_READONLY.
519 */
520 smp_wmb();
521 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
522 unlock_mount_hash();
523 return ret;
524 }
525
526 static void __mnt_unmake_readonly(struct mount *mnt)
527 {
528 lock_mount_hash();
529 mnt->mnt.mnt_flags &= ~MNT_READONLY;
530 unlock_mount_hash();
531 }
532
533 int sb_prepare_remount_readonly(struct super_block *sb)
534 {
535 struct mount *mnt;
536 int err = 0;
537
538 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
539 if (atomic_long_read(&sb->s_remove_count))
540 return -EBUSY;
541
542 lock_mount_hash();
543 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
544 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
545 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
546 smp_mb();
547 if (mnt_get_writers(mnt) > 0) {
548 err = -EBUSY;
549 break;
550 }
551 }
552 }
553 if (!err && atomic_long_read(&sb->s_remove_count))
554 err = -EBUSY;
555
556 if (!err) {
557 sb->s_readonly_remount = 1;
558 smp_wmb();
559 }
560 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
561 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
562 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
563 }
564 unlock_mount_hash();
565
566 return err;
567 }
568
569 static void free_vfsmnt(struct mount *mnt)
570 {
571 kfree(mnt->mnt_devname);
572 #ifdef CONFIG_SMP
573 free_percpu(mnt->mnt_pcp);
574 #endif
575 kmem_cache_free(mnt_cache, mnt);
576 }
577
578 static void delayed_free_vfsmnt(struct rcu_head *head)
579 {
580 free_vfsmnt(container_of(head, struct mount, mnt_rcu));
581 }
582
583 /* call under rcu_read_lock */
584 bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
585 {
586 struct mount *mnt;
587 if (read_seqretry(&mount_lock, seq))
588 return false;
589 if (bastard == NULL)
590 return true;
591 mnt = real_mount(bastard);
592 mnt_add_count(mnt, 1);
593 if (likely(!read_seqretry(&mount_lock, seq)))
594 return true;
595 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
596 mnt_add_count(mnt, -1);
597 return false;
598 }
599 rcu_read_unlock();
600 mntput(bastard);
601 rcu_read_lock();
602 return false;
603 }
604
605 /*
606 * find the first mount at @dentry on vfsmount @mnt.
607 * call under rcu_read_lock()
608 */
609 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
610 {
611 struct hlist_head *head = m_hash(mnt, dentry);
612 struct mount *p;
613
614 hlist_for_each_entry_rcu(p, head, mnt_hash)
615 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
616 return p;
617 return NULL;
618 }
619
620 /*
621 * find the last mount at @dentry on vfsmount @mnt.
622 * mount_lock must be held.
623 */
624 struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
625 {
626 struct mount *p, *res;
627 res = p = __lookup_mnt(mnt, dentry);
628 if (!p)
629 goto out;
630 hlist_for_each_entry_continue(p, mnt_hash) {
631 if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
632 break;
633 res = p;
634 }
635 out:
636 return res;
637 }
638
639 /*
640 * lookup_mnt - Return the first child mount mounted at path
641 *
642 * "First" means first mounted chronologically. If you create the
643 * following mounts:
644 *
645 * mount /dev/sda1 /mnt
646 * mount /dev/sda2 /mnt
647 * mount /dev/sda3 /mnt
648 *
649 * Then lookup_mnt() on the base /mnt dentry in the root mount will
650 * return successively the root dentry and vfsmount of /dev/sda1, then
651 * /dev/sda2, then /dev/sda3, then NULL.
652 *
653 * lookup_mnt takes a reference to the found vfsmount.
654 */
655 struct vfsmount *lookup_mnt(struct path *path)
656 {
657 struct mount *child_mnt;
658 struct vfsmount *m;
659 unsigned seq;
660
661 rcu_read_lock();
662 do {
663 seq = read_seqbegin(&mount_lock);
664 child_mnt = __lookup_mnt(path->mnt, path->dentry);
665 m = child_mnt ? &child_mnt->mnt : NULL;
666 } while (!legitimize_mnt(m, seq));
667 rcu_read_unlock();
668 return m;
669 }
670
671 /*
672 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
673 * current mount namespace.
674 *
675 * The common case is dentries are not mountpoints at all and that
676 * test is handled inline. For the slow case when we are actually
677 * dealing with a mountpoint of some kind, walk through all of the
678 * mounts in the current mount namespace and test to see if the dentry
679 * is a mountpoint.
680 *
681 * The mount_hashtable is not usable in the context because we
682 * need to identify all mounts that may be in the current mount
683 * namespace not just a mount that happens to have some specified
684 * parent mount.
685 */
686 bool __is_local_mountpoint(struct dentry *dentry)
687 {
688 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
689 struct mount *mnt;
690 bool is_covered = false;
691
692 if (!d_mountpoint(dentry))
693 goto out;
694
695 down_read(&namespace_sem);
696 list_for_each_entry(mnt, &ns->list, mnt_list) {
697 is_covered = (mnt->mnt_mountpoint == dentry);
698 if (is_covered)
699 break;
700 }
701 up_read(&namespace_sem);
702 out:
703 return is_covered;
704 }
705
706 static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
707 {
708 struct hlist_head *chain = mp_hash(dentry);
709 struct mountpoint *mp;
710
711 hlist_for_each_entry(mp, chain, m_hash) {
712 if (mp->m_dentry == dentry) {
713 /* might be worth a WARN_ON() */
714 if (d_unlinked(dentry))
715 return ERR_PTR(-ENOENT);
716 mp->m_count++;
717 return mp;
718 }
719 }
720 return NULL;
721 }
722
723 static struct mountpoint *new_mountpoint(struct dentry *dentry)
724 {
725 struct hlist_head *chain = mp_hash(dentry);
726 struct mountpoint *mp;
727 int ret;
728
729 mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
730 if (!mp)
731 return ERR_PTR(-ENOMEM);
732
733 ret = d_set_mounted(dentry);
734 if (ret) {
735 kfree(mp);
736 return ERR_PTR(ret);
737 }
738
739 mp->m_dentry = dentry;
740 mp->m_count = 1;
741 hlist_add_head(&mp->m_hash, chain);
742 INIT_HLIST_HEAD(&mp->m_list);
743 return mp;
744 }
745
746 static void put_mountpoint(struct mountpoint *mp)
747 {
748 if (!--mp->m_count) {
749 struct dentry *dentry = mp->m_dentry;
750 BUG_ON(!hlist_empty(&mp->m_list));
751 spin_lock(&dentry->d_lock);
752 dentry->d_flags &= ~DCACHE_MOUNTED;
753 spin_unlock(&dentry->d_lock);
754 hlist_del(&mp->m_hash);
755 kfree(mp);
756 }
757 }
758
759 static inline int check_mnt(struct mount *mnt)
760 {
761 return mnt->mnt_ns == current->nsproxy->mnt_ns;
762 }
763
764 /*
765 * vfsmount lock must be held for write
766 */
767 static void touch_mnt_namespace(struct mnt_namespace *ns)
768 {
769 if (ns) {
770 ns->event = ++event;
771 wake_up_interruptible(&ns->poll);
772 }
773 }
774
775 /*
776 * vfsmount lock must be held for write
777 */
778 static void __touch_mnt_namespace(struct mnt_namespace *ns)
779 {
780 if (ns && ns->event != event) {
781 ns->event = event;
782 wake_up_interruptible(&ns->poll);
783 }
784 }
785
786 /*
787 * vfsmount lock must be held for write
788 */
789 static void detach_mnt(struct mount *mnt, struct path *old_path)
790 {
791 old_path->dentry = mnt->mnt_mountpoint;
792 old_path->mnt = &mnt->mnt_parent->mnt;
793 mnt->mnt_parent = mnt;
794 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
795 list_del_init(&mnt->mnt_child);
796 hlist_del_init_rcu(&mnt->mnt_hash);
797 hlist_del_init(&mnt->mnt_mp_list);
798 put_mountpoint(mnt->mnt_mp);
799 mnt->mnt_mp = NULL;
800 }
801
802 /*
803 * vfsmount lock must be held for write
804 */
805 void mnt_set_mountpoint(struct mount *mnt,
806 struct mountpoint *mp,
807 struct mount *child_mnt)
808 {
809 mp->m_count++;
810 mnt_add_count(mnt, 1); /* essentially, that's mntget */
811 child_mnt->mnt_mountpoint = dget(mp->m_dentry);
812 child_mnt->mnt_parent = mnt;
813 child_mnt->mnt_mp = mp;
814 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
815 }
816
817 /*
818 * vfsmount lock must be held for write
819 */
820 static void attach_mnt(struct mount *mnt,
821 struct mount *parent,
822 struct mountpoint *mp)
823 {
824 mnt_set_mountpoint(parent, mp, mnt);
825 hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
826 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
827 }
828
829 static void attach_shadowed(struct mount *mnt,
830 struct mount *parent,
831 struct mount *shadows)
832 {
833 if (shadows) {
834 hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash);
835 list_add(&mnt->mnt_child, &shadows->mnt_child);
836 } else {
837 hlist_add_head_rcu(&mnt->mnt_hash,
838 m_hash(&parent->mnt, mnt->mnt_mountpoint));
839 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
840 }
841 }
842
843 /*
844 * vfsmount lock must be held for write
845 */
846 static void commit_tree(struct mount *mnt, struct mount *shadows)
847 {
848 struct mount *parent = mnt->mnt_parent;
849 struct mount *m;
850 LIST_HEAD(head);
851 struct mnt_namespace *n = parent->mnt_ns;
852
853 BUG_ON(parent == mnt);
854
855 list_add_tail(&head, &mnt->mnt_list);
856 list_for_each_entry(m, &head, mnt_list)
857 m->mnt_ns = n;
858
859 list_splice(&head, n->list.prev);
860
861 attach_shadowed(mnt, parent, shadows);
862 touch_mnt_namespace(n);
863 }
864
865 static struct mount *next_mnt(struct mount *p, struct mount *root)
866 {
867 struct list_head *next = p->mnt_mounts.next;
868 if (next == &p->mnt_mounts) {
869 while (1) {
870 if (p == root)
871 return NULL;
872 next = p->mnt_child.next;
873 if (next != &p->mnt_parent->mnt_mounts)
874 break;
875 p = p->mnt_parent;
876 }
877 }
878 return list_entry(next, struct mount, mnt_child);
879 }
880
881 static struct mount *skip_mnt_tree(struct mount *p)
882 {
883 struct list_head *prev = p->mnt_mounts.prev;
884 while (prev != &p->mnt_mounts) {
885 p = list_entry(prev, struct mount, mnt_child);
886 prev = p->mnt_mounts.prev;
887 }
888 return p;
889 }
890
891 struct vfsmount *
892 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
893 {
894 struct mount *mnt;
895 struct dentry *root;
896
897 if (!type)
898 return ERR_PTR(-ENODEV);
899
900 mnt = alloc_vfsmnt(name);
901 if (!mnt)
902 return ERR_PTR(-ENOMEM);
903
904 if (flags & MS_KERNMOUNT)
905 mnt->mnt.mnt_flags = MNT_INTERNAL;
906
907 root = mount_fs(type, flags, name, data);
908 if (IS_ERR(root)) {
909 mnt_free_id(mnt);
910 free_vfsmnt(mnt);
911 return ERR_CAST(root);
912 }
913
914 mnt->mnt.mnt_root = root;
915 mnt->mnt.mnt_sb = root->d_sb;
916 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
917 mnt->mnt_parent = mnt;
918 lock_mount_hash();
919 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
920 unlock_mount_hash();
921 return &mnt->mnt;
922 }
923 EXPORT_SYMBOL_GPL(vfs_kern_mount);
924
925 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
926 int flag)
927 {
928 struct super_block *sb = old->mnt.mnt_sb;
929 struct mount *mnt;
930 int err;
931
932 mnt = alloc_vfsmnt(old->mnt_devname);
933 if (!mnt)
934 return ERR_PTR(-ENOMEM);
935
936 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
937 mnt->mnt_group_id = 0; /* not a peer of original */
938 else
939 mnt->mnt_group_id = old->mnt_group_id;
940
941 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
942 err = mnt_alloc_group_id(mnt);
943 if (err)
944 goto out_free;
945 }
946
947 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
948 /* Don't allow unprivileged users to change mount flags */
949 if (flag & CL_UNPRIVILEGED) {
950 mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
951
952 if (mnt->mnt.mnt_flags & MNT_READONLY)
953 mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
954
955 if (mnt->mnt.mnt_flags & MNT_NODEV)
956 mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
957
958 if (mnt->mnt.mnt_flags & MNT_NOSUID)
959 mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
960
961 if (mnt->mnt.mnt_flags & MNT_NOEXEC)
962 mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
963 }
964
965 /* Don't allow unprivileged users to reveal what is under a mount */
966 if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire))
967 mnt->mnt.mnt_flags |= MNT_LOCKED;
968
969 atomic_inc(&sb->s_active);
970 mnt->mnt.mnt_sb = sb;
971 mnt->mnt.mnt_root = dget(root);
972 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
973 mnt->mnt_parent = mnt;
974 lock_mount_hash();
975 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
976 unlock_mount_hash();
977
978 if ((flag & CL_SLAVE) ||
979 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
980 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
981 mnt->mnt_master = old;
982 CLEAR_MNT_SHARED(mnt);
983 } else if (!(flag & CL_PRIVATE)) {
984 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
985 list_add(&mnt->mnt_share, &old->mnt_share);
986 if (IS_MNT_SLAVE(old))
987 list_add(&mnt->mnt_slave, &old->mnt_slave);
988 mnt->mnt_master = old->mnt_master;
989 }
990 if (flag & CL_MAKE_SHARED)
991 set_mnt_shared(mnt);
992
993 /* stick the duplicate mount on the same expiry list
994 * as the original if that was on one */
995 if (flag & CL_EXPIRE) {
996 if (!list_empty(&old->mnt_expire))
997 list_add(&mnt->mnt_expire, &old->mnt_expire);
998 }
999
1000 return mnt;
1001
1002 out_free:
1003 mnt_free_id(mnt);
1004 free_vfsmnt(mnt);
1005 return ERR_PTR(err);
1006 }
1007
1008 static void cleanup_mnt(struct mount *mnt)
1009 {
1010 /*
1011 * This probably indicates that somebody messed
1012 * up a mnt_want/drop_write() pair. If this
1013 * happens, the filesystem was probably unable
1014 * to make r/w->r/o transitions.
1015 */
1016 /*
1017 * The locking used to deal with mnt_count decrement provides barriers,
1018 * so mnt_get_writers() below is safe.
1019 */
1020 WARN_ON(mnt_get_writers(mnt));
1021 if (unlikely(mnt->mnt_pins.first))
1022 mnt_pin_kill(mnt);
1023 fsnotify_vfsmount_delete(&mnt->mnt);
1024 dput(mnt->mnt.mnt_root);
1025 deactivate_super(mnt->mnt.mnt_sb);
1026 mnt_free_id(mnt);
1027 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1028 }
1029
1030 static void __cleanup_mnt(struct rcu_head *head)
1031 {
1032 cleanup_mnt(container_of(head, struct mount, mnt_rcu));
1033 }
1034
1035 static LLIST_HEAD(delayed_mntput_list);
1036 static void delayed_mntput(struct work_struct *unused)
1037 {
1038 struct llist_node *node = llist_del_all(&delayed_mntput_list);
1039 struct llist_node *next;
1040
1041 for (; node; node = next) {
1042 next = llist_next(node);
1043 cleanup_mnt(llist_entry(node, struct mount, mnt_llist));
1044 }
1045 }
1046 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1047
1048 static void mntput_no_expire(struct mount *mnt)
1049 {
1050 rcu_read_lock();
1051 mnt_add_count(mnt, -1);
1052 if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
1053 rcu_read_unlock();
1054 return;
1055 }
1056 lock_mount_hash();
1057 if (mnt_get_count(mnt)) {
1058 rcu_read_unlock();
1059 unlock_mount_hash();
1060 return;
1061 }
1062 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1063 rcu_read_unlock();
1064 unlock_mount_hash();
1065 return;
1066 }
1067 mnt->mnt.mnt_flags |= MNT_DOOMED;
1068 rcu_read_unlock();
1069
1070 list_del(&mnt->mnt_instance);
1071 unlock_mount_hash();
1072
1073 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1074 struct task_struct *task = current;
1075 if (likely(!(task->flags & PF_KTHREAD))) {
1076 init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1077 if (!task_work_add(task, &mnt->mnt_rcu, true))
1078 return;
1079 }
1080 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1081 schedule_delayed_work(&delayed_mntput_work, 1);
1082 return;
1083 }
1084 cleanup_mnt(mnt);
1085 }
1086
1087 void mntput(struct vfsmount *mnt)
1088 {
1089 if (mnt) {
1090 struct mount *m = real_mount(mnt);
1091 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1092 if (unlikely(m->mnt_expiry_mark))
1093 m->mnt_expiry_mark = 0;
1094 mntput_no_expire(m);
1095 }
1096 }
1097 EXPORT_SYMBOL(mntput);
1098
1099 struct vfsmount *mntget(struct vfsmount *mnt)
1100 {
1101 if (mnt)
1102 mnt_add_count(real_mount(mnt), 1);
1103 return mnt;
1104 }
1105 EXPORT_SYMBOL(mntget);
1106
1107 struct vfsmount *mnt_clone_internal(struct path *path)
1108 {
1109 struct mount *p;
1110 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1111 if (IS_ERR(p))
1112 return ERR_CAST(p);
1113 p->mnt.mnt_flags |= MNT_INTERNAL;
1114 return &p->mnt;
1115 }
1116
1117 static inline void mangle(struct seq_file *m, const char *s)
1118 {
1119 seq_escape(m, s, " \t\n\\");
1120 }
1121
1122 /*
1123 * Simple .show_options callback for filesystems which don't want to
1124 * implement more complex mount option showing.
1125 *
1126 * See also save_mount_options().
1127 */
1128 int generic_show_options(struct seq_file *m, struct dentry *root)
1129 {
1130 const char *options;
1131
1132 rcu_read_lock();
1133 options = rcu_dereference(root->d_sb->s_options);
1134
1135 if (options != NULL && options[0]) {
1136 seq_putc(m, ',');
1137 mangle(m, options);
1138 }
1139 rcu_read_unlock();
1140
1141 return 0;
1142 }
1143 EXPORT_SYMBOL(generic_show_options);
1144
1145 /*
1146 * If filesystem uses generic_show_options(), this function should be
1147 * called from the fill_super() callback.
1148 *
1149 * The .remount_fs callback usually needs to be handled in a special
1150 * way, to make sure, that previous options are not overwritten if the
1151 * remount fails.
1152 *
1153 * Also note, that if the filesystem's .remount_fs function doesn't
1154 * reset all options to their default value, but changes only newly
1155 * given options, then the displayed options will not reflect reality
1156 * any more.
1157 */
1158 void save_mount_options(struct super_block *sb, char *options)
1159 {
1160 BUG_ON(sb->s_options);
1161 rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL));
1162 }
1163 EXPORT_SYMBOL(save_mount_options);
1164
1165 void replace_mount_options(struct super_block *sb, char *options)
1166 {
1167 char *old = sb->s_options;
1168 rcu_assign_pointer(sb->s_options, options);
1169 if (old) {
1170 synchronize_rcu();
1171 kfree(old);
1172 }
1173 }
1174 EXPORT_SYMBOL(replace_mount_options);
1175
1176 #ifdef CONFIG_PROC_FS
1177 /* iterator; we want it to have access to namespace_sem, thus here... */
1178 static void *m_start(struct seq_file *m, loff_t *pos)
1179 {
1180 struct proc_mounts *p = proc_mounts(m);
1181
1182 down_read(&namespace_sem);
1183 if (p->cached_event == p->ns->event) {
1184 void *v = p->cached_mount;
1185 if (*pos == p->cached_index)
1186 return v;
1187 if (*pos == p->cached_index + 1) {
1188 v = seq_list_next(v, &p->ns->list, &p->cached_index);
1189 return p->cached_mount = v;
1190 }
1191 }
1192
1193 p->cached_event = p->ns->event;
1194 p->cached_mount = seq_list_start(&p->ns->list, *pos);
1195 p->cached_index = *pos;
1196 return p->cached_mount;
1197 }
1198
1199 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1200 {
1201 struct proc_mounts *p = proc_mounts(m);
1202
1203 p->cached_mount = seq_list_next(v, &p->ns->list, pos);
1204 p->cached_index = *pos;
1205 return p->cached_mount;
1206 }
1207
1208 static void m_stop(struct seq_file *m, void *v)
1209 {
1210 up_read(&namespace_sem);
1211 }
1212
1213 static int m_show(struct seq_file *m, void *v)
1214 {
1215 struct proc_mounts *p = proc_mounts(m);
1216 struct mount *r = list_entry(v, struct mount, mnt_list);
1217 return p->show(m, &r->mnt);
1218 }
1219
1220 const struct seq_operations mounts_op = {
1221 .start = m_start,
1222 .next = m_next,
1223 .stop = m_stop,
1224 .show = m_show,
1225 };
1226 #endif /* CONFIG_PROC_FS */
1227
1228 /**
1229 * may_umount_tree - check if a mount tree is busy
1230 * @mnt: root of mount tree
1231 *
1232 * This is called to check if a tree of mounts has any
1233 * open files, pwds, chroots or sub mounts that are
1234 * busy.
1235 */
1236 int may_umount_tree(struct vfsmount *m)
1237 {
1238 struct mount *mnt = real_mount(m);
1239 int actual_refs = 0;
1240 int minimum_refs = 0;
1241 struct mount *p;
1242 BUG_ON(!m);
1243
1244 /* write lock needed for mnt_get_count */
1245 lock_mount_hash();
1246 for (p = mnt; p; p = next_mnt(p, mnt)) {
1247 actual_refs += mnt_get_count(p);
1248 minimum_refs += 2;
1249 }
1250 unlock_mount_hash();
1251
1252 if (actual_refs > minimum_refs)
1253 return 0;
1254
1255 return 1;
1256 }
1257
1258 EXPORT_SYMBOL(may_umount_tree);
1259
1260 /**
1261 * may_umount - check if a mount point is busy
1262 * @mnt: root of mount
1263 *
1264 * This is called to check if a mount point has any
1265 * open files, pwds, chroots or sub mounts. If the
1266 * mount has sub mounts this will return busy
1267 * regardless of whether the sub mounts are busy.
1268 *
1269 * Doesn't take quota and stuff into account. IOW, in some cases it will
1270 * give false negatives. The main reason why it's here is that we need
1271 * a non-destructive way to look for easily umountable filesystems.
1272 */
1273 int may_umount(struct vfsmount *mnt)
1274 {
1275 int ret = 1;
1276 down_read(&namespace_sem);
1277 lock_mount_hash();
1278 if (propagate_mount_busy(real_mount(mnt), 2))
1279 ret = 0;
1280 unlock_mount_hash();
1281 up_read(&namespace_sem);
1282 return ret;
1283 }
1284
1285 EXPORT_SYMBOL(may_umount);
1286
1287 static HLIST_HEAD(unmounted); /* protected by namespace_sem */
1288
1289 static void namespace_unlock(void)
1290 {
1291 struct mount *mnt;
1292 struct hlist_head head = unmounted;
1293
1294 if (likely(hlist_empty(&head))) {
1295 up_write(&namespace_sem);
1296 return;
1297 }
1298
1299 head.first->pprev = &head.first;
1300 INIT_HLIST_HEAD(&unmounted);
1301
1302 /* undo decrements we'd done in umount_tree() */
1303 hlist_for_each_entry(mnt, &head, mnt_hash)
1304 if (mnt->mnt_ex_mountpoint.mnt)
1305 mntget(mnt->mnt_ex_mountpoint.mnt);
1306
1307 up_write(&namespace_sem);
1308
1309 synchronize_rcu();
1310
1311 while (!hlist_empty(&head)) {
1312 mnt = hlist_entry(head.first, struct mount, mnt_hash);
1313 hlist_del_init(&mnt->mnt_hash);
1314 if (mnt->mnt_ex_mountpoint.mnt)
1315 path_put(&mnt->mnt_ex_mountpoint);
1316 mntput(&mnt->mnt);
1317 }
1318 }
1319
1320 static inline void namespace_lock(void)
1321 {
1322 down_write(&namespace_sem);
1323 }
1324
1325 /*
1326 * mount_lock must be held
1327 * namespace_sem must be held for write
1328 * how = 0 => just this tree, don't propagate
1329 * how = 1 => propagate; we know that nobody else has reference to any victims
1330 * how = 2 => lazy umount
1331 */
1332 void umount_tree(struct mount *mnt, int how)
1333 {
1334 HLIST_HEAD(tmp_list);
1335 struct mount *p;
1336 struct mount *last = NULL;
1337
1338 for (p = mnt; p; p = next_mnt(p, mnt)) {
1339 hlist_del_init_rcu(&p->mnt_hash);
1340 hlist_add_head(&p->mnt_hash, &tmp_list);
1341 }
1342
1343 hlist_for_each_entry(p, &tmp_list, mnt_hash)
1344 list_del_init(&p->mnt_child);
1345
1346 if (how)
1347 propagate_umount(&tmp_list);
1348
1349 hlist_for_each_entry(p, &tmp_list, mnt_hash) {
1350 list_del_init(&p->mnt_expire);
1351 list_del_init(&p->mnt_list);
1352 __touch_mnt_namespace(p->mnt_ns);
1353 p->mnt_ns = NULL;
1354 if (how < 2)
1355 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1356 if (mnt_has_parent(p)) {
1357 hlist_del_init(&p->mnt_mp_list);
1358 put_mountpoint(p->mnt_mp);
1359 mnt_add_count(p->mnt_parent, -1);
1360 /* move the reference to mountpoint into ->mnt_ex_mountpoint */
1361 p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint;
1362 p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt;
1363 p->mnt_mountpoint = p->mnt.mnt_root;
1364 p->mnt_parent = p;
1365 p->mnt_mp = NULL;
1366 }
1367 change_mnt_propagation(p, MS_PRIVATE);
1368 last = p;
1369 }
1370 if (last) {
1371 last->mnt_hash.next = unmounted.first;
1372 unmounted.first = tmp_list.first;
1373 unmounted.first->pprev = &unmounted.first;
1374 }
1375 }
1376
1377 static void shrink_submounts(struct mount *mnt);
1378
1379 static int do_umount(struct mount *mnt, int flags)
1380 {
1381 struct super_block *sb = mnt->mnt.mnt_sb;
1382 int retval;
1383
1384 retval = security_sb_umount(&mnt->mnt, flags);
1385 if (retval)
1386 return retval;
1387
1388 /*
1389 * Allow userspace to request a mountpoint be expired rather than
1390 * unmounting unconditionally. Unmount only happens if:
1391 * (1) the mark is already set (the mark is cleared by mntput())
1392 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1393 */
1394 if (flags & MNT_EXPIRE) {
1395 if (&mnt->mnt == current->fs->root.mnt ||
1396 flags & (MNT_FORCE | MNT_DETACH))
1397 return -EINVAL;
1398
1399 /*
1400 * probably don't strictly need the lock here if we examined
1401 * all race cases, but it's a slowpath.
1402 */
1403 lock_mount_hash();
1404 if (mnt_get_count(mnt) != 2) {
1405 unlock_mount_hash();
1406 return -EBUSY;
1407 }
1408 unlock_mount_hash();
1409
1410 if (!xchg(&mnt->mnt_expiry_mark, 1))
1411 return -EAGAIN;
1412 }
1413
1414 /*
1415 * If we may have to abort operations to get out of this
1416 * mount, and they will themselves hold resources we must
1417 * allow the fs to do things. In the Unix tradition of
1418 * 'Gee thats tricky lets do it in userspace' the umount_begin
1419 * might fail to complete on the first run through as other tasks
1420 * must return, and the like. Thats for the mount program to worry
1421 * about for the moment.
1422 */
1423
1424 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1425 sb->s_op->umount_begin(sb);
1426 }
1427
1428 /*
1429 * No sense to grab the lock for this test, but test itself looks
1430 * somewhat bogus. Suggestions for better replacement?
1431 * Ho-hum... In principle, we might treat that as umount + switch
1432 * to rootfs. GC would eventually take care of the old vfsmount.
1433 * Actually it makes sense, especially if rootfs would contain a
1434 * /reboot - static binary that would close all descriptors and
1435 * call reboot(9). Then init(8) could umount root and exec /reboot.
1436 */
1437 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1438 /*
1439 * Special case for "unmounting" root ...
1440 * we just try to remount it readonly.
1441 */
1442 if (!capable(CAP_SYS_ADMIN))
1443 return -EPERM;
1444 down_write(&sb->s_umount);
1445 if (!(sb->s_flags & MS_RDONLY))
1446 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
1447 up_write(&sb->s_umount);
1448 return retval;
1449 }
1450
1451 namespace_lock();
1452 lock_mount_hash();
1453 event++;
1454
1455 if (flags & MNT_DETACH) {
1456 if (!list_empty(&mnt->mnt_list))
1457 umount_tree(mnt, 2);
1458 retval = 0;
1459 } else {
1460 shrink_submounts(mnt);
1461 retval = -EBUSY;
1462 if (!propagate_mount_busy(mnt, 2)) {
1463 if (!list_empty(&mnt->mnt_list))
1464 umount_tree(mnt, 1);
1465 retval = 0;
1466 }
1467 }
1468 unlock_mount_hash();
1469 namespace_unlock();
1470 return retval;
1471 }
1472
1473 /*
1474 * __detach_mounts - lazily unmount all mounts on the specified dentry
1475 *
1476 * During unlink, rmdir, and d_drop it is possible to loose the path
1477 * to an existing mountpoint, and wind up leaking the mount.
1478 * detach_mounts allows lazily unmounting those mounts instead of
1479 * leaking them.
1480 *
1481 * The caller may hold dentry->d_inode->i_mutex.
1482 */
1483 void __detach_mounts(struct dentry *dentry)
1484 {
1485 struct mountpoint *mp;
1486 struct mount *mnt;
1487
1488 namespace_lock();
1489 mp = lookup_mountpoint(dentry);
1490 if (!mp)
1491 goto out_unlock;
1492
1493 lock_mount_hash();
1494 while (!hlist_empty(&mp->m_list)) {
1495 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1496 umount_tree(mnt, 2);
1497 }
1498 unlock_mount_hash();
1499 put_mountpoint(mp);
1500 out_unlock:
1501 namespace_unlock();
1502 }
1503
1504 /*
1505 * Is the caller allowed to modify his namespace?
1506 */
1507 static inline bool may_mount(void)
1508 {
1509 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1510 }
1511
1512 /*
1513 * Now umount can handle mount points as well as block devices.
1514 * This is important for filesystems which use unnamed block devices.
1515 *
1516 * We now support a flag for forced unmount like the other 'big iron'
1517 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1518 */
1519
1520 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1521 {
1522 struct path path;
1523 struct mount *mnt;
1524 int retval;
1525 int lookup_flags = 0;
1526
1527 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1528 return -EINVAL;
1529
1530 if (!may_mount())
1531 return -EPERM;
1532
1533 if (!(flags & UMOUNT_NOFOLLOW))
1534 lookup_flags |= LOOKUP_FOLLOW;
1535
1536 retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path);
1537 if (retval)
1538 goto out;
1539 mnt = real_mount(path.mnt);
1540 retval = -EINVAL;
1541 if (path.dentry != path.mnt->mnt_root)
1542 goto dput_and_out;
1543 if (!check_mnt(mnt))
1544 goto dput_and_out;
1545 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1546 goto dput_and_out;
1547
1548 retval = do_umount(mnt, flags);
1549 dput_and_out:
1550 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1551 dput(path.dentry);
1552 mntput_no_expire(mnt);
1553 out:
1554 return retval;
1555 }
1556
1557 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1558
1559 /*
1560 * The 2.0 compatible umount. No flags.
1561 */
1562 SYSCALL_DEFINE1(oldumount, char __user *, name)
1563 {
1564 return sys_umount(name, 0);
1565 }
1566
1567 #endif
1568
1569 static bool is_mnt_ns_file(struct dentry *dentry)
1570 {
1571 /* Is this a proxy for a mount namespace? */
1572 struct inode *inode = dentry->d_inode;
1573 return proc_ns_inode(inode) && dentry->d_fsdata == &mntns_operations;
1574 }
1575
1576 struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
1577 {
1578 return container_of(ns, struct mnt_namespace, ns);
1579 }
1580
1581 static bool mnt_ns_loop(struct dentry *dentry)
1582 {
1583 /* Could bind mounting the mount namespace inode cause a
1584 * mount namespace loop?
1585 */
1586 struct mnt_namespace *mnt_ns;
1587 if (!is_mnt_ns_file(dentry))
1588 return false;
1589
1590 mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
1591 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
1592 }
1593
1594 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1595 int flag)
1596 {
1597 struct mount *res, *p, *q, *r, *parent;
1598
1599 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1600 return ERR_PTR(-EINVAL);
1601
1602 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
1603 return ERR_PTR(-EINVAL);
1604
1605 res = q = clone_mnt(mnt, dentry, flag);
1606 if (IS_ERR(q))
1607 return q;
1608
1609 q->mnt.mnt_flags &= ~MNT_LOCKED;
1610 q->mnt_mountpoint = mnt->mnt_mountpoint;
1611
1612 p = mnt;
1613 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1614 struct mount *s;
1615 if (!is_subdir(r->mnt_mountpoint, dentry))
1616 continue;
1617
1618 for (s = r; s; s = next_mnt(s, r)) {
1619 struct mount *t = NULL;
1620 if (!(flag & CL_COPY_UNBINDABLE) &&
1621 IS_MNT_UNBINDABLE(s)) {
1622 s = skip_mnt_tree(s);
1623 continue;
1624 }
1625 if (!(flag & CL_COPY_MNT_NS_FILE) &&
1626 is_mnt_ns_file(s->mnt.mnt_root)) {
1627 s = skip_mnt_tree(s);
1628 continue;
1629 }
1630 while (p != s->mnt_parent) {
1631 p = p->mnt_parent;
1632 q = q->mnt_parent;
1633 }
1634 p = s;
1635 parent = q;
1636 q = clone_mnt(p, p->mnt.mnt_root, flag);
1637 if (IS_ERR(q))
1638 goto out;
1639 lock_mount_hash();
1640 list_add_tail(&q->mnt_list, &res->mnt_list);
1641 mnt_set_mountpoint(parent, p->mnt_mp, q);
1642 if (!list_empty(&parent->mnt_mounts)) {
1643 t = list_last_entry(&parent->mnt_mounts,
1644 struct mount, mnt_child);
1645 if (t->mnt_mp != p->mnt_mp)
1646 t = NULL;
1647 }
1648 attach_shadowed(q, parent, t);
1649 unlock_mount_hash();
1650 }
1651 }
1652 return res;
1653 out:
1654 if (res) {
1655 lock_mount_hash();
1656 umount_tree(res, 0);
1657 unlock_mount_hash();
1658 }
1659 return q;
1660 }
1661
1662 /* Caller should check returned pointer for errors */
1663
1664 struct vfsmount *collect_mounts(struct path *path)
1665 {
1666 struct mount *tree;
1667 namespace_lock();
1668 tree = copy_tree(real_mount(path->mnt), path->dentry,
1669 CL_COPY_ALL | CL_PRIVATE);
1670 namespace_unlock();
1671 if (IS_ERR(tree))
1672 return ERR_CAST(tree);
1673 return &tree->mnt;
1674 }
1675
1676 void drop_collected_mounts(struct vfsmount *mnt)
1677 {
1678 namespace_lock();
1679 lock_mount_hash();
1680 umount_tree(real_mount(mnt), 0);
1681 unlock_mount_hash();
1682 namespace_unlock();
1683 }
1684
1685 /**
1686 * clone_private_mount - create a private clone of a path
1687 *
1688 * This creates a new vfsmount, which will be the clone of @path. The new will
1689 * not be attached anywhere in the namespace and will be private (i.e. changes
1690 * to the originating mount won't be propagated into this).
1691 *
1692 * Release with mntput().
1693 */
1694 struct vfsmount *clone_private_mount(struct path *path)
1695 {
1696 struct mount *old_mnt = real_mount(path->mnt);
1697 struct mount *new_mnt;
1698
1699 if (IS_MNT_UNBINDABLE(old_mnt))
1700 return ERR_PTR(-EINVAL);
1701
1702 down_read(&namespace_sem);
1703 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
1704 up_read(&namespace_sem);
1705 if (IS_ERR(new_mnt))
1706 return ERR_CAST(new_mnt);
1707
1708 return &new_mnt->mnt;
1709 }
1710 EXPORT_SYMBOL_GPL(clone_private_mount);
1711
1712 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
1713 struct vfsmount *root)
1714 {
1715 struct mount *mnt;
1716 int res = f(root, arg);
1717 if (res)
1718 return res;
1719 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
1720 res = f(&mnt->mnt, arg);
1721 if (res)
1722 return res;
1723 }
1724 return 0;
1725 }
1726
1727 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
1728 {
1729 struct mount *p;
1730
1731 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1732 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1733 mnt_release_group_id(p);
1734 }
1735 }
1736
1737 static int invent_group_ids(struct mount *mnt, bool recurse)
1738 {
1739 struct mount *p;
1740
1741 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1742 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1743 int err = mnt_alloc_group_id(p);
1744 if (err) {
1745 cleanup_group_ids(mnt, p);
1746 return err;
1747 }
1748 }
1749 }
1750
1751 return 0;
1752 }
1753
1754 /*
1755 * @source_mnt : mount tree to be attached
1756 * @nd : place the mount tree @source_mnt is attached
1757 * @parent_nd : if non-null, detach the source_mnt from its parent and
1758 * store the parent mount and mountpoint dentry.
1759 * (done when source_mnt is moved)
1760 *
1761 * NOTE: in the table below explains the semantics when a source mount
1762 * of a given type is attached to a destination mount of a given type.
1763 * ---------------------------------------------------------------------------
1764 * | BIND MOUNT OPERATION |
1765 * |**************************************************************************
1766 * | source-->| shared | private | slave | unbindable |
1767 * | dest | | | | |
1768 * | | | | | | |
1769 * | v | | | | |
1770 * |**************************************************************************
1771 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1772 * | | | | | |
1773 * |non-shared| shared (+) | private | slave (*) | invalid |
1774 * ***************************************************************************
1775 * A bind operation clones the source mount and mounts the clone on the
1776 * destination mount.
1777 *
1778 * (++) the cloned mount is propagated to all the mounts in the propagation
1779 * tree of the destination mount and the cloned mount is added to
1780 * the peer group of the source mount.
1781 * (+) the cloned mount is created under the destination mount and is marked
1782 * as shared. The cloned mount is added to the peer group of the source
1783 * mount.
1784 * (+++) the mount is propagated to all the mounts in the propagation tree
1785 * of the destination mount and the cloned mount is made slave
1786 * of the same master as that of the source mount. The cloned mount
1787 * is marked as 'shared and slave'.
1788 * (*) the cloned mount is made a slave of the same master as that of the
1789 * source mount.
1790 *
1791 * ---------------------------------------------------------------------------
1792 * | MOVE MOUNT OPERATION |
1793 * |**************************************************************************
1794 * | source-->| shared | private | slave | unbindable |
1795 * | dest | | | | |
1796 * | | | | | | |
1797 * | v | | | | |
1798 * |**************************************************************************
1799 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1800 * | | | | | |
1801 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1802 * ***************************************************************************
1803 *
1804 * (+) the mount is moved to the destination. And is then propagated to
1805 * all the mounts in the propagation tree of the destination mount.
1806 * (+*) the mount is moved to the destination.
1807 * (+++) the mount is moved to the destination and is then propagated to
1808 * all the mounts belonging to the destination mount's propagation tree.
1809 * the mount is marked as 'shared and slave'.
1810 * (*) the mount continues to be a slave at the new location.
1811 *
1812 * if the source mount is a tree, the operations explained above is
1813 * applied to each mount in the tree.
1814 * Must be called without spinlocks held, since this function can sleep
1815 * in allocations.
1816 */
1817 static int attach_recursive_mnt(struct mount *source_mnt,
1818 struct mount *dest_mnt,
1819 struct mountpoint *dest_mp,
1820 struct path *parent_path)
1821 {
1822 HLIST_HEAD(tree_list);
1823 struct mount *child, *p;
1824 struct hlist_node *n;
1825 int err;
1826
1827 if (IS_MNT_SHARED(dest_mnt)) {
1828 err = invent_group_ids(source_mnt, true);
1829 if (err)
1830 goto out;
1831 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
1832 lock_mount_hash();
1833 if (err)
1834 goto out_cleanup_ids;
1835 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1836 set_mnt_shared(p);
1837 } else {
1838 lock_mount_hash();
1839 }
1840 if (parent_path) {
1841 detach_mnt(source_mnt, parent_path);
1842 attach_mnt(source_mnt, dest_mnt, dest_mp);
1843 touch_mnt_namespace(source_mnt->mnt_ns);
1844 } else {
1845 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
1846 commit_tree(source_mnt, NULL);
1847 }
1848
1849 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
1850 struct mount *q;
1851 hlist_del_init(&child->mnt_hash);
1852 q = __lookup_mnt_last(&child->mnt_parent->mnt,
1853 child->mnt_mountpoint);
1854 commit_tree(child, q);
1855 }
1856 unlock_mount_hash();
1857
1858 return 0;
1859
1860 out_cleanup_ids:
1861 while (!hlist_empty(&tree_list)) {
1862 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
1863 umount_tree(child, 0);
1864 }
1865 unlock_mount_hash();
1866 cleanup_group_ids(source_mnt, NULL);
1867 out:
1868 return err;
1869 }
1870
1871 static struct mountpoint *lock_mount(struct path *path)
1872 {
1873 struct vfsmount *mnt;
1874 struct dentry *dentry = path->dentry;
1875 retry:
1876 mutex_lock(&dentry->d_inode->i_mutex);
1877 if (unlikely(cant_mount(dentry))) {
1878 mutex_unlock(&dentry->d_inode->i_mutex);
1879 return ERR_PTR(-ENOENT);
1880 }
1881 namespace_lock();
1882 mnt = lookup_mnt(path);
1883 if (likely(!mnt)) {
1884 struct mountpoint *mp = lookup_mountpoint(dentry);
1885 if (!mp)
1886 mp = new_mountpoint(dentry);
1887 if (IS_ERR(mp)) {
1888 namespace_unlock();
1889 mutex_unlock(&dentry->d_inode->i_mutex);
1890 return mp;
1891 }
1892 return mp;
1893 }
1894 namespace_unlock();
1895 mutex_unlock(&path->dentry->d_inode->i_mutex);
1896 path_put(path);
1897 path->mnt = mnt;
1898 dentry = path->dentry = dget(mnt->mnt_root);
1899 goto retry;
1900 }
1901
1902 static void unlock_mount(struct mountpoint *where)
1903 {
1904 struct dentry *dentry = where->m_dentry;
1905 put_mountpoint(where);
1906 namespace_unlock();
1907 mutex_unlock(&dentry->d_inode->i_mutex);
1908 }
1909
1910 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
1911 {
1912 if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER)
1913 return -EINVAL;
1914
1915 if (S_ISDIR(mp->m_dentry->d_inode->i_mode) !=
1916 S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode))
1917 return -ENOTDIR;
1918
1919 return attach_recursive_mnt(mnt, p, mp, NULL);
1920 }
1921
1922 /*
1923 * Sanity check the flags to change_mnt_propagation.
1924 */
1925
1926 static int flags_to_propagation_type(int flags)
1927 {
1928 int type = flags & ~(MS_REC | MS_SILENT);
1929
1930 /* Fail if any non-propagation flags are set */
1931 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
1932 return 0;
1933 /* Only one propagation flag should be set */
1934 if (!is_power_of_2(type))
1935 return 0;
1936 return type;
1937 }
1938
1939 /*
1940 * recursively change the type of the mountpoint.
1941 */
1942 static int do_change_type(struct path *path, int flag)
1943 {
1944 struct mount *m;
1945 struct mount *mnt = real_mount(path->mnt);
1946 int recurse = flag & MS_REC;
1947 int type;
1948 int err = 0;
1949
1950 if (path->dentry != path->mnt->mnt_root)
1951 return -EINVAL;
1952
1953 type = flags_to_propagation_type(flag);
1954 if (!type)
1955 return -EINVAL;
1956
1957 namespace_lock();
1958 if (type == MS_SHARED) {
1959 err = invent_group_ids(mnt, recurse);
1960 if (err)
1961 goto out_unlock;
1962 }
1963
1964 lock_mount_hash();
1965 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1966 change_mnt_propagation(m, type);
1967 unlock_mount_hash();
1968
1969 out_unlock:
1970 namespace_unlock();
1971 return err;
1972 }
1973
1974 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
1975 {
1976 struct mount *child;
1977 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
1978 if (!is_subdir(child->mnt_mountpoint, dentry))
1979 continue;
1980
1981 if (child->mnt.mnt_flags & MNT_LOCKED)
1982 return true;
1983 }
1984 return false;
1985 }
1986
1987 /*
1988 * do loopback mount.
1989 */
1990 static int do_loopback(struct path *path, const char *old_name,
1991 int recurse)
1992 {
1993 struct path old_path;
1994 struct mount *mnt = NULL, *old, *parent;
1995 struct mountpoint *mp;
1996 int err;
1997 if (!old_name || !*old_name)
1998 return -EINVAL;
1999 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
2000 if (err)
2001 return err;
2002
2003 err = -EINVAL;
2004 if (mnt_ns_loop(old_path.dentry))
2005 goto out;
2006
2007 mp = lock_mount(path);
2008 err = PTR_ERR(mp);
2009 if (IS_ERR(mp))
2010 goto out;
2011
2012 old = real_mount(old_path.mnt);
2013 parent = real_mount(path->mnt);
2014
2015 err = -EINVAL;
2016 if (IS_MNT_UNBINDABLE(old))
2017 goto out2;
2018
2019 if (!check_mnt(parent) || !check_mnt(old))
2020 goto out2;
2021
2022 if (!recurse && has_locked_children(old, old_path.dentry))
2023 goto out2;
2024
2025 if (recurse)
2026 mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE);
2027 else
2028 mnt = clone_mnt(old, old_path.dentry, 0);
2029
2030 if (IS_ERR(mnt)) {
2031 err = PTR_ERR(mnt);
2032 goto out2;
2033 }
2034
2035 mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2036
2037 err = graft_tree(mnt, parent, mp);
2038 if (err) {
2039 lock_mount_hash();
2040 umount_tree(mnt, 0);
2041 unlock_mount_hash();
2042 }
2043 out2:
2044 unlock_mount(mp);
2045 out:
2046 path_put(&old_path);
2047 return err;
2048 }
2049
2050 static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
2051 {
2052 int error = 0;
2053 int readonly_request = 0;
2054
2055 if (ms_flags & MS_RDONLY)
2056 readonly_request = 1;
2057 if (readonly_request == __mnt_is_readonly(mnt))
2058 return 0;
2059
2060 if (readonly_request)
2061 error = mnt_make_readonly(real_mount(mnt));
2062 else
2063 __mnt_unmake_readonly(real_mount(mnt));
2064 return error;
2065 }
2066
2067 /*
2068 * change filesystem flags. dir should be a physical root of filesystem.
2069 * If you've mounted a non-root directory somewhere and want to do remount
2070 * on it - tough luck.
2071 */
2072 static int do_remount(struct path *path, int flags, int mnt_flags,
2073 void *data)
2074 {
2075 int err;
2076 struct super_block *sb = path->mnt->mnt_sb;
2077 struct mount *mnt = real_mount(path->mnt);
2078
2079 if (!check_mnt(mnt))
2080 return -EINVAL;
2081
2082 if (path->dentry != path->mnt->mnt_root)
2083 return -EINVAL;
2084
2085 /* Don't allow changing of locked mnt flags.
2086 *
2087 * No locks need to be held here while testing the various
2088 * MNT_LOCK flags because those flags can never be cleared
2089 * once they are set.
2090 */
2091 if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
2092 !(mnt_flags & MNT_READONLY)) {
2093 return -EPERM;
2094 }
2095 if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
2096 !(mnt_flags & MNT_NODEV)) {
2097 return -EPERM;
2098 }
2099 if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
2100 !(mnt_flags & MNT_NOSUID)) {
2101 return -EPERM;
2102 }
2103 if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
2104 !(mnt_flags & MNT_NOEXEC)) {
2105 return -EPERM;
2106 }
2107 if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
2108 ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
2109 return -EPERM;
2110 }
2111
2112 err = security_sb_remount(sb, data);
2113 if (err)
2114 return err;
2115
2116 down_write(&sb->s_umount);
2117 if (flags & MS_BIND)
2118 err = change_mount_flags(path->mnt, flags);
2119 else if (!capable(CAP_SYS_ADMIN))
2120 err = -EPERM;
2121 else
2122 err = do_remount_sb(sb, flags, data, 0);
2123 if (!err) {
2124 lock_mount_hash();
2125 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2126 mnt->mnt.mnt_flags = mnt_flags;
2127 touch_mnt_namespace(mnt->mnt_ns);
2128 unlock_mount_hash();
2129 }
2130 up_write(&sb->s_umount);
2131 return err;
2132 }
2133
2134 static inline int tree_contains_unbindable(struct mount *mnt)
2135 {
2136 struct mount *p;
2137 for (p = mnt; p; p = next_mnt(p, mnt)) {
2138 if (IS_MNT_UNBINDABLE(p))
2139 return 1;
2140 }
2141 return 0;
2142 }
2143
2144 static int do_move_mount(struct path *path, const char *old_name)
2145 {
2146 struct path old_path, parent_path;
2147 struct mount *p;
2148 struct mount *old;
2149 struct mountpoint *mp;
2150 int err;
2151 if (!old_name || !*old_name)
2152 return -EINVAL;
2153 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
2154 if (err)
2155 return err;
2156
2157 mp = lock_mount(path);
2158 err = PTR_ERR(mp);
2159 if (IS_ERR(mp))
2160 goto out;
2161
2162 old = real_mount(old_path.mnt);
2163 p = real_mount(path->mnt);
2164
2165 err = -EINVAL;
2166 if (!check_mnt(p) || !check_mnt(old))
2167 goto out1;
2168
2169 if (old->mnt.mnt_flags & MNT_LOCKED)
2170 goto out1;
2171
2172 err = -EINVAL;
2173 if (old_path.dentry != old_path.mnt->mnt_root)
2174 goto out1;
2175
2176 if (!mnt_has_parent(old))
2177 goto out1;
2178
2179 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
2180 S_ISDIR(old_path.dentry->d_inode->i_mode))
2181 goto out1;
2182 /*
2183 * Don't move a mount residing in a shared parent.
2184 */
2185 if (IS_MNT_SHARED(old->mnt_parent))
2186 goto out1;
2187 /*
2188 * Don't move a mount tree containing unbindable mounts to a destination
2189 * mount which is shared.
2190 */
2191 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
2192 goto out1;
2193 err = -ELOOP;
2194 for (; mnt_has_parent(p); p = p->mnt_parent)
2195 if (p == old)
2196 goto out1;
2197
2198 err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path);
2199 if (err)
2200 goto out1;
2201
2202 /* if the mount is moved, it should no longer be expire
2203 * automatically */
2204 list_del_init(&old->mnt_expire);
2205 out1:
2206 unlock_mount(mp);
2207 out:
2208 if (!err)
2209 path_put(&parent_path);
2210 path_put(&old_path);
2211 return err;
2212 }
2213
2214 static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
2215 {
2216 int err;
2217 const char *subtype = strchr(fstype, '.');
2218 if (subtype) {
2219 subtype++;
2220 err = -EINVAL;
2221 if (!subtype[0])
2222 goto err;
2223 } else
2224 subtype = "";
2225
2226 mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
2227 err = -ENOMEM;
2228 if (!mnt->mnt_sb->s_subtype)
2229 goto err;
2230 return mnt;
2231
2232 err:
2233 mntput(mnt);
2234 return ERR_PTR(err);
2235 }
2236
2237 /*
2238 * add a mount into a namespace's mount tree
2239 */
2240 static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
2241 {
2242 struct mountpoint *mp;
2243 struct mount *parent;
2244 int err;
2245
2246 mnt_flags &= ~MNT_INTERNAL_FLAGS;
2247
2248 mp = lock_mount(path);
2249 if (IS_ERR(mp))
2250 return PTR_ERR(mp);
2251
2252 parent = real_mount(path->mnt);
2253 err = -EINVAL;
2254 if (unlikely(!check_mnt(parent))) {
2255 /* that's acceptable only for automounts done in private ns */
2256 if (!(mnt_flags & MNT_SHRINKABLE))
2257 goto unlock;
2258 /* ... and for those we'd better have mountpoint still alive */
2259 if (!parent->mnt_ns)
2260 goto unlock;
2261 }
2262
2263 /* Refuse the same filesystem on the same mount point */
2264 err = -EBUSY;
2265 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
2266 path->mnt->mnt_root == path->dentry)
2267 goto unlock;
2268
2269 err = -EINVAL;
2270 if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode))
2271 goto unlock;
2272
2273 newmnt->mnt.mnt_flags = mnt_flags;
2274 err = graft_tree(newmnt, parent, mp);
2275
2276 unlock:
2277 unlock_mount(mp);
2278 return err;
2279 }
2280
2281 /*
2282 * create a new mount for userspace and request it to be added into the
2283 * namespace's tree
2284 */
2285 static int do_new_mount(struct path *path, const char *fstype, int flags,
2286 int mnt_flags, const char *name, void *data)
2287 {
2288 struct file_system_type *type;
2289 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2290 struct vfsmount *mnt;
2291 int err;
2292
2293 if (!fstype)
2294 return -EINVAL;
2295
2296 type = get_fs_type(fstype);
2297 if (!type)
2298 return -ENODEV;
2299
2300 if (user_ns != &init_user_ns) {
2301 if (!(type->fs_flags & FS_USERNS_MOUNT)) {
2302 put_filesystem(type);
2303 return -EPERM;
2304 }
2305 /* Only in special cases allow devices from mounts
2306 * created outside the initial user namespace.
2307 */
2308 if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
2309 flags |= MS_NODEV;
2310 mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
2311 }
2312 }
2313
2314 mnt = vfs_kern_mount(type, flags, name, data);
2315 if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
2316 !mnt->mnt_sb->s_subtype)
2317 mnt = fs_set_subtype(mnt, fstype);
2318
2319 put_filesystem(type);
2320 if (IS_ERR(mnt))
2321 return PTR_ERR(mnt);
2322
2323 err = do_add_mount(real_mount(mnt), path, mnt_flags);
2324 if (err)
2325 mntput(mnt);
2326 return err;
2327 }
2328
2329 int finish_automount(struct vfsmount *m, struct path *path)
2330 {
2331 struct mount *mnt = real_mount(m);
2332 int err;
2333 /* The new mount record should have at least 2 refs to prevent it being
2334 * expired before we get a chance to add it
2335 */
2336 BUG_ON(mnt_get_count(mnt) < 2);
2337
2338 if (m->mnt_sb == path->mnt->mnt_sb &&
2339 m->mnt_root == path->dentry) {
2340 err = -ELOOP;
2341 goto fail;
2342 }
2343
2344 err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
2345 if (!err)
2346 return 0;
2347 fail:
2348 /* remove m from any expiration list it may be on */
2349 if (!list_empty(&mnt->mnt_expire)) {
2350 namespace_lock();
2351 list_del_init(&mnt->mnt_expire);
2352 namespace_unlock();
2353 }
2354 mntput(m);
2355 mntput(m);
2356 return err;
2357 }
2358
2359 /**
2360 * mnt_set_expiry - Put a mount on an expiration list
2361 * @mnt: The mount to list.
2362 * @expiry_list: The list to add the mount to.
2363 */
2364 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
2365 {
2366 namespace_lock();
2367
2368 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
2369
2370 namespace_unlock();
2371 }
2372 EXPORT_SYMBOL(mnt_set_expiry);
2373
2374 /*
2375 * process a list of expirable mountpoints with the intent of discarding any
2376 * mountpoints that aren't in use and haven't been touched since last we came
2377 * here
2378 */
2379 void mark_mounts_for_expiry(struct list_head *mounts)
2380 {
2381 struct mount *mnt, *next;
2382 LIST_HEAD(graveyard);
2383
2384 if (list_empty(mounts))
2385 return;
2386
2387 namespace_lock();
2388 lock_mount_hash();
2389
2390 /* extract from the expiration list every vfsmount that matches the
2391 * following criteria:
2392 * - only referenced by its parent vfsmount
2393 * - still marked for expiry (marked on the last call here; marks are
2394 * cleared by mntput())
2395 */
2396 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
2397 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
2398 propagate_mount_busy(mnt, 1))
2399 continue;
2400 list_move(&mnt->mnt_expire, &graveyard);
2401 }
2402 while (!list_empty(&graveyard)) {
2403 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
2404 touch_mnt_namespace(mnt->mnt_ns);
2405 umount_tree(mnt, 1);
2406 }
2407 unlock_mount_hash();
2408 namespace_unlock();
2409 }
2410
2411 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
2412
2413 /*
2414 * Ripoff of 'select_parent()'
2415 *
2416 * search the list of submounts for a given mountpoint, and move any
2417 * shrinkable submounts to the 'graveyard' list.
2418 */
2419 static int select_submounts(struct mount *parent, struct list_head *graveyard)
2420 {
2421 struct mount *this_parent = parent;
2422 struct list_head *next;
2423 int found = 0;
2424
2425 repeat:
2426 next = this_parent->mnt_mounts.next;
2427 resume:
2428 while (next != &this_parent->mnt_mounts) {
2429 struct list_head *tmp = next;
2430 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
2431
2432 next = tmp->next;
2433 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
2434 continue;
2435 /*
2436 * Descend a level if the d_mounts list is non-empty.
2437 */
2438 if (!list_empty(&mnt->mnt_mounts)) {
2439 this_parent = mnt;
2440 goto repeat;
2441 }
2442
2443 if (!propagate_mount_busy(mnt, 1)) {
2444 list_move_tail(&mnt->mnt_expire, graveyard);
2445 found++;
2446 }
2447 }
2448 /*
2449 * All done at this level ... ascend and resume the search
2450 */
2451 if (this_parent != parent) {
2452 next = this_parent->mnt_child.next;
2453 this_parent = this_parent->mnt_parent;
2454 goto resume;
2455 }
2456 return found;
2457 }
2458
2459 /*
2460 * process a list of expirable mountpoints with the intent of discarding any
2461 * submounts of a specific parent mountpoint
2462 *
2463 * mount_lock must be held for write
2464 */
2465 static void shrink_submounts(struct mount *mnt)
2466 {
2467 LIST_HEAD(graveyard);
2468 struct mount *m;
2469
2470 /* extract submounts of 'mountpoint' from the expiration list */
2471 while (select_submounts(mnt, &graveyard)) {
2472 while (!list_empty(&graveyard)) {
2473 m = list_first_entry(&graveyard, struct mount,
2474 mnt_expire);
2475 touch_mnt_namespace(m->mnt_ns);
2476 umount_tree(m, 1);
2477 }
2478 }
2479 }
2480
2481 /*
2482 * Some copy_from_user() implementations do not return the exact number of
2483 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
2484 * Note that this function differs from copy_from_user() in that it will oops
2485 * on bad values of `to', rather than returning a short copy.
2486 */
2487 static long exact_copy_from_user(void *to, const void __user * from,
2488 unsigned long n)
2489 {
2490 char *t = to;
2491 const char __user *f = from;
2492 char c;
2493
2494 if (!access_ok(VERIFY_READ, from, n))
2495 return n;
2496
2497 while (n) {
2498 if (__get_user(c, f)) {
2499 memset(t, 0, n);
2500 break;
2501 }
2502 *t++ = c;
2503 f++;
2504 n--;
2505 }
2506 return n;
2507 }
2508
2509 int copy_mount_options(const void __user * data, unsigned long *where)
2510 {
2511 int i;
2512 unsigned long page;
2513 unsigned long size;
2514
2515 *where = 0;
2516 if (!data)
2517 return 0;
2518
2519 if (!(page = __get_free_page(GFP_KERNEL)))
2520 return -ENOMEM;
2521
2522 /* We only care that *some* data at the address the user
2523 * gave us is valid. Just in case, we'll zero
2524 * the remainder of the page.
2525 */
2526 /* copy_from_user cannot cross TASK_SIZE ! */
2527 size = TASK_SIZE - (unsigned long)data;
2528 if (size > PAGE_SIZE)
2529 size = PAGE_SIZE;
2530
2531 i = size - exact_copy_from_user((void *)page, data, size);
2532 if (!i) {
2533 free_page(page);
2534 return -EFAULT;
2535 }
2536 if (i != PAGE_SIZE)
2537 memset((char *)page + i, 0, PAGE_SIZE - i);
2538 *where = page;
2539 return 0;
2540 }
2541
2542 char *copy_mount_string(const void __user *data)
2543 {
2544 return data ? strndup_user(data, PAGE_SIZE) : NULL;
2545 }
2546
2547 /*
2548 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2549 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2550 *
2551 * data is a (void *) that can point to any structure up to
2552 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2553 * information (or be NULL).
2554 *
2555 * Pre-0.97 versions of mount() didn't have a flags word.
2556 * When the flags word was introduced its top half was required
2557 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2558 * Therefore, if this magic number is present, it carries no information
2559 * and must be discarded.
2560 */
2561 long do_mount(const char *dev_name, const char __user *dir_name,
2562 const char *type_page, unsigned long flags, void *data_page)
2563 {
2564 struct path path;
2565 int retval = 0;
2566 int mnt_flags = 0;
2567
2568 /* Discard magic */
2569 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
2570 flags &= ~MS_MGC_MSK;
2571
2572 /* Basic sanity checks */
2573 if (data_page)
2574 ((char *)data_page)[PAGE_SIZE - 1] = 0;
2575
2576 /* ... and get the mountpoint */
2577 retval = user_path(dir_name, &path);
2578 if (retval)
2579 return retval;
2580
2581 retval = security_sb_mount(dev_name, &path,
2582 type_page, flags, data_page);
2583 if (!retval && !may_mount())
2584 retval = -EPERM;
2585 if (retval)
2586 goto dput_out;
2587
2588 /* Default to relatime unless overriden */
2589 if (!(flags & MS_NOATIME))
2590 mnt_flags |= MNT_RELATIME;
2591
2592 /* Separate the per-mountpoint flags */
2593 if (flags & MS_NOSUID)
2594 mnt_flags |= MNT_NOSUID;
2595 if (flags & MS_NODEV)
2596 mnt_flags |= MNT_NODEV;
2597 if (flags & MS_NOEXEC)
2598 mnt_flags |= MNT_NOEXEC;
2599 if (flags & MS_NOATIME)
2600 mnt_flags |= MNT_NOATIME;
2601 if (flags & MS_NODIRATIME)
2602 mnt_flags |= MNT_NODIRATIME;
2603 if (flags & MS_STRICTATIME)
2604 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
2605 if (flags & MS_RDONLY)
2606 mnt_flags |= MNT_READONLY;
2607
2608 /* The default atime for remount is preservation */
2609 if ((flags & MS_REMOUNT) &&
2610 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
2611 MS_STRICTATIME)) == 0)) {
2612 mnt_flags &= ~MNT_ATIME_MASK;
2613 mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
2614 }
2615
2616 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
2617 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
2618 MS_STRICTATIME);
2619
2620 if (flags & MS_REMOUNT)
2621 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
2622 data_page);
2623 else if (flags & MS_BIND)
2624 retval = do_loopback(&path, dev_name, flags & MS_REC);
2625 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2626 retval = do_change_type(&path, flags);
2627 else if (flags & MS_MOVE)
2628 retval = do_move_mount(&path, dev_name);
2629 else
2630 retval = do_new_mount(&path, type_page, flags, mnt_flags,
2631 dev_name, data_page);
2632 dput_out:
2633 path_put(&path);
2634 return retval;
2635 }
2636
2637 static void free_mnt_ns(struct mnt_namespace *ns)
2638 {
2639 ns_free_inum(&ns->ns);
2640 put_user_ns(ns->user_ns);
2641 kfree(ns);
2642 }
2643
2644 /*
2645 * Assign a sequence number so we can detect when we attempt to bind
2646 * mount a reference to an older mount namespace into the current
2647 * mount namespace, preventing reference counting loops. A 64bit
2648 * number incrementing at 10Ghz will take 12,427 years to wrap which
2649 * is effectively never, so we can ignore the possibility.
2650 */
2651 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
2652
2653 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
2654 {
2655 struct mnt_namespace *new_ns;
2656 int ret;
2657
2658 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
2659 if (!new_ns)
2660 return ERR_PTR(-ENOMEM);
2661 ret = ns_alloc_inum(&new_ns->ns);
2662 if (ret) {
2663 kfree(new_ns);
2664 return ERR_PTR(ret);
2665 }
2666 new_ns->ns.ops = &mntns_operations;
2667 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
2668 atomic_set(&new_ns->count, 1);
2669 new_ns->root = NULL;
2670 INIT_LIST_HEAD(&new_ns->list);
2671 init_waitqueue_head(&new_ns->poll);
2672 new_ns->event = 0;
2673 new_ns->user_ns = get_user_ns(user_ns);
2674 return new_ns;
2675 }
2676
2677 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
2678 struct user_namespace *user_ns, struct fs_struct *new_fs)
2679 {
2680 struct mnt_namespace *new_ns;
2681 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
2682 struct mount *p, *q;
2683 struct mount *old;
2684 struct mount *new;
2685 int copy_flags;
2686
2687 BUG_ON(!ns);
2688
2689 if (likely(!(flags & CLONE_NEWNS))) {
2690 get_mnt_ns(ns);
2691 return ns;
2692 }
2693
2694 old = ns->root;
2695
2696 new_ns = alloc_mnt_ns(user_ns);
2697 if (IS_ERR(new_ns))
2698 return new_ns;
2699
2700 namespace_lock();
2701 /* First pass: copy the tree topology */
2702 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
2703 if (user_ns != ns->user_ns)
2704 copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
2705 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
2706 if (IS_ERR(new)) {
2707 namespace_unlock();
2708 free_mnt_ns(new_ns);
2709 return ERR_CAST(new);
2710 }
2711 new_ns->root = new;
2712 list_add_tail(&new_ns->list, &new->mnt_list);
2713
2714 /*
2715 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2716 * as belonging to new namespace. We have already acquired a private
2717 * fs_struct, so tsk->fs->lock is not needed.
2718 */
2719 p = old;
2720 q = new;
2721 while (p) {
2722 q->mnt_ns = new_ns;
2723 if (new_fs) {
2724 if (&p->mnt == new_fs->root.mnt) {
2725 new_fs->root.mnt = mntget(&q->mnt);
2726 rootmnt = &p->mnt;
2727 }
2728 if (&p->mnt == new_fs->pwd.mnt) {
2729 new_fs->pwd.mnt = mntget(&q->mnt);
2730 pwdmnt = &p->mnt;
2731 }
2732 }
2733 p = next_mnt(p, old);
2734 q = next_mnt(q, new);
2735 if (!q)
2736 break;
2737 while (p->mnt.mnt_root != q->mnt.mnt_root)
2738 p = next_mnt(p, old);
2739 }
2740 namespace_unlock();
2741
2742 if (rootmnt)
2743 mntput(rootmnt);
2744 if (pwdmnt)
2745 mntput(pwdmnt);
2746
2747 return new_ns;
2748 }
2749
2750 /**
2751 * create_mnt_ns - creates a private namespace and adds a root filesystem
2752 * @mnt: pointer to the new root filesystem mountpoint
2753 */
2754 static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
2755 {
2756 struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns);
2757 if (!IS_ERR(new_ns)) {
2758 struct mount *mnt = real_mount(m);
2759 mnt->mnt_ns = new_ns;
2760 new_ns->root = mnt;
2761 list_add(&mnt->mnt_list, &new_ns->list);
2762 } else {
2763 mntput(m);
2764 }
2765 return new_ns;
2766 }
2767
2768 struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
2769 {
2770 struct mnt_namespace *ns;
2771 struct super_block *s;
2772 struct path path;
2773 int err;
2774
2775 ns = create_mnt_ns(mnt);
2776 if (IS_ERR(ns))
2777 return ERR_CAST(ns);
2778
2779 err = vfs_path_lookup(mnt->mnt_root, mnt,
2780 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
2781
2782 put_mnt_ns(ns);
2783
2784 if (err)
2785 return ERR_PTR(err);
2786
2787 /* trade a vfsmount reference for active sb one */
2788 s = path.mnt->mnt_sb;
2789 atomic_inc(&s->s_active);
2790 mntput(path.mnt);
2791 /* lock the sucker */
2792 down_write(&s->s_umount);
2793 /* ... and return the root of (sub)tree on it */
2794 return path.dentry;
2795 }
2796 EXPORT_SYMBOL(mount_subtree);
2797
2798 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
2799 char __user *, type, unsigned long, flags, void __user *, data)
2800 {
2801 int ret;
2802 char *kernel_type;
2803 char *kernel_dev;
2804 unsigned long data_page;
2805
2806 kernel_type = copy_mount_string(type);
2807 ret = PTR_ERR(kernel_type);
2808 if (IS_ERR(kernel_type))
2809 goto out_type;
2810
2811 kernel_dev = copy_mount_string(dev_name);
2812 ret = PTR_ERR(kernel_dev);
2813 if (IS_ERR(kernel_dev))
2814 goto out_dev;
2815
2816 ret = copy_mount_options(data, &data_page);
2817 if (ret < 0)
2818 goto out_data;
2819
2820 ret = do_mount(kernel_dev, dir_name, kernel_type, flags,
2821 (void *) data_page);
2822
2823 free_page(data_page);
2824 out_data:
2825 kfree(kernel_dev);
2826 out_dev:
2827 kfree(kernel_type);
2828 out_type:
2829 return ret;
2830 }
2831
2832 /*
2833 * Return true if path is reachable from root
2834 *
2835 * namespace_sem or mount_lock is held
2836 */
2837 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
2838 const struct path *root)
2839 {
2840 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
2841 dentry = mnt->mnt_mountpoint;
2842 mnt = mnt->mnt_parent;
2843 }
2844 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
2845 }
2846
2847 int path_is_under(struct path *path1, struct path *path2)
2848 {
2849 int res;
2850 read_seqlock_excl(&mount_lock);
2851 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
2852 read_sequnlock_excl(&mount_lock);
2853 return res;
2854 }
2855 EXPORT_SYMBOL(path_is_under);
2856
2857 /*
2858 * pivot_root Semantics:
2859 * Moves the root file system of the current process to the directory put_old,
2860 * makes new_root as the new root file system of the current process, and sets
2861 * root/cwd of all processes which had them on the current root to new_root.
2862 *
2863 * Restrictions:
2864 * The new_root and put_old must be directories, and must not be on the
2865 * same file system as the current process root. The put_old must be
2866 * underneath new_root, i.e. adding a non-zero number of /.. to the string
2867 * pointed to by put_old must yield the same directory as new_root. No other
2868 * file system may be mounted on put_old. After all, new_root is a mountpoint.
2869 *
2870 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2871 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2872 * in this situation.
2873 *
2874 * Notes:
2875 * - we don't move root/cwd if they are not at the root (reason: if something
2876 * cared enough to change them, it's probably wrong to force them elsewhere)
2877 * - it's okay to pick a root that isn't the root of a file system, e.g.
2878 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2879 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2880 * first.
2881 */
2882 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2883 const char __user *, put_old)
2884 {
2885 struct path new, old, parent_path, root_parent, root;
2886 struct mount *new_mnt, *root_mnt, *old_mnt;
2887 struct mountpoint *old_mp, *root_mp;
2888 int error;
2889
2890 if (!may_mount())
2891 return -EPERM;
2892
2893 error = user_path_dir(new_root, &new);
2894 if (error)
2895 goto out0;
2896
2897 error = user_path_dir(put_old, &old);
2898 if (error)
2899 goto out1;
2900
2901 error = security_sb_pivotroot(&old, &new);
2902 if (error)
2903 goto out2;
2904
2905 get_fs_root(current->fs, &root);
2906 old_mp = lock_mount(&old);
2907 error = PTR_ERR(old_mp);
2908 if (IS_ERR(old_mp))
2909 goto out3;
2910
2911 error = -EINVAL;
2912 new_mnt = real_mount(new.mnt);
2913 root_mnt = real_mount(root.mnt);
2914 old_mnt = real_mount(old.mnt);
2915 if (IS_MNT_SHARED(old_mnt) ||
2916 IS_MNT_SHARED(new_mnt->mnt_parent) ||
2917 IS_MNT_SHARED(root_mnt->mnt_parent))
2918 goto out4;
2919 if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
2920 goto out4;
2921 if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
2922 goto out4;
2923 error = -ENOENT;
2924 if (d_unlinked(new.dentry))
2925 goto out4;
2926 error = -EBUSY;
2927 if (new_mnt == root_mnt || old_mnt == root_mnt)
2928 goto out4; /* loop, on the same file system */
2929 error = -EINVAL;
2930 if (root.mnt->mnt_root != root.dentry)
2931 goto out4; /* not a mountpoint */
2932 if (!mnt_has_parent(root_mnt))
2933 goto out4; /* not attached */
2934 root_mp = root_mnt->mnt_mp;
2935 if (new.mnt->mnt_root != new.dentry)
2936 goto out4; /* not a mountpoint */
2937 if (!mnt_has_parent(new_mnt))
2938 goto out4; /* not attached */
2939 /* make sure we can reach put_old from new_root */
2940 if (!is_path_reachable(old_mnt, old.dentry, &new))
2941 goto out4;
2942 /* make certain new is below the root */
2943 if (!is_path_reachable(new_mnt, new.dentry, &root))
2944 goto out4;
2945 root_mp->m_count++; /* pin it so it won't go away */
2946 lock_mount_hash();
2947 detach_mnt(new_mnt, &parent_path);
2948 detach_mnt(root_mnt, &root_parent);
2949 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
2950 new_mnt->mnt.mnt_flags |= MNT_LOCKED;
2951 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2952 }
2953 /* mount old root on put_old */
2954 attach_mnt(root_mnt, old_mnt, old_mp);
2955 /* mount new_root on / */
2956 attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
2957 touch_mnt_namespace(current->nsproxy->mnt_ns);
2958 unlock_mount_hash();
2959 chroot_fs_refs(&root, &new);
2960 put_mountpoint(root_mp);
2961 error = 0;
2962 out4:
2963 unlock_mount(old_mp);
2964 if (!error) {
2965 path_put(&root_parent);
2966 path_put(&parent_path);
2967 }
2968 out3:
2969 path_put(&root);
2970 out2:
2971 path_put(&old);
2972 out1:
2973 path_put(&new);
2974 out0:
2975 return error;
2976 }
2977
2978 static void __init init_mount_tree(void)
2979 {
2980 struct vfsmount *mnt;
2981 struct mnt_namespace *ns;
2982 struct path root;
2983 struct file_system_type *type;
2984
2985 type = get_fs_type("rootfs");
2986 if (!type)
2987 panic("Can't find rootfs type");
2988 mnt = vfs_kern_mount(type, 0, "rootfs", NULL);
2989 put_filesystem(type);
2990 if (IS_ERR(mnt))
2991 panic("Can't create rootfs");
2992
2993 ns = create_mnt_ns(mnt);
2994 if (IS_ERR(ns))
2995 panic("Can't allocate initial namespace");
2996
2997 init_task.nsproxy->mnt_ns = ns;
2998 get_mnt_ns(ns);
2999
3000 root.mnt = mnt;
3001 root.dentry = mnt->mnt_root;
3002
3003 set_fs_pwd(current->fs, &root);
3004 set_fs_root(current->fs, &root);
3005 }
3006
3007 void __init mnt_init(void)
3008 {
3009 unsigned u;
3010 int err;
3011
3012 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
3013 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3014
3015 mount_hashtable = alloc_large_system_hash("Mount-cache",
3016 sizeof(struct hlist_head),
3017 mhash_entries, 19,
3018 0,
3019 &m_hash_shift, &m_hash_mask, 0, 0);
3020 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
3021 sizeof(struct hlist_head),
3022 mphash_entries, 19,
3023 0,
3024 &mp_hash_shift, &mp_hash_mask, 0, 0);
3025
3026 if (!mount_hashtable || !mountpoint_hashtable)
3027 panic("Failed to allocate mount hash table\n");
3028
3029 for (u = 0; u <= m_hash_mask; u++)
3030 INIT_HLIST_HEAD(&mount_hashtable[u]);
3031 for (u = 0; u <= mp_hash_mask; u++)
3032 INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
3033
3034 kernfs_init();
3035
3036 err = sysfs_init();
3037 if (err)
3038 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
3039 __func__, err);
3040 fs_kobj = kobject_create_and_add("fs", NULL);
3041 if (!fs_kobj)
3042 printk(KERN_WARNING "%s: kobj create error\n", __func__);
3043 init_rootfs();
3044 init_mount_tree();
3045 }
3046
3047 void put_mnt_ns(struct mnt_namespace *ns)
3048 {
3049 if (!atomic_dec_and_test(&ns->count))
3050 return;
3051 drop_collected_mounts(&ns->root->mnt);
3052 free_mnt_ns(ns);
3053 }
3054
3055 struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
3056 {
3057 struct vfsmount *mnt;
3058 mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data);
3059 if (!IS_ERR(mnt)) {
3060 /*
3061 * it is a longterm mount, don't release mnt until
3062 * we unmount before file sys is unregistered
3063 */
3064 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
3065 }
3066 return mnt;
3067 }
3068 EXPORT_SYMBOL_GPL(kern_mount_data);
3069
3070 void kern_unmount(struct vfsmount *mnt)
3071 {
3072 /* release long term mount so mount point can be released */
3073 if (!IS_ERR_OR_NULL(mnt)) {
3074 real_mount(mnt)->mnt_ns = NULL;
3075 synchronize_rcu(); /* yecchhh... */
3076 mntput(mnt);
3077 }
3078 }
3079 EXPORT_SYMBOL(kern_unmount);
3080
3081 bool our_mnt(struct vfsmount *mnt)
3082 {
3083 return check_mnt(real_mount(mnt));
3084 }
3085
3086 bool current_chrooted(void)
3087 {
3088 /* Does the current process have a non-standard root */
3089 struct path ns_root;
3090 struct path fs_root;
3091 bool chrooted;
3092
3093 /* Find the namespace root */
3094 ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
3095 ns_root.dentry = ns_root.mnt->mnt_root;
3096 path_get(&ns_root);
3097 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
3098 ;
3099
3100 get_fs_root(current->fs, &fs_root);
3101
3102 chrooted = !path_equal(&fs_root, &ns_root);
3103
3104 path_put(&fs_root);
3105 path_put(&ns_root);
3106
3107 return chrooted;
3108 }
3109
3110 bool fs_fully_visible(struct file_system_type *type)
3111 {
3112 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
3113 struct mount *mnt;
3114 bool visible = false;
3115
3116 if (unlikely(!ns))
3117 return false;
3118
3119 down_read(&namespace_sem);
3120 list_for_each_entry(mnt, &ns->list, mnt_list) {
3121 struct mount *child;
3122 if (mnt->mnt.mnt_sb->s_type != type)
3123 continue;
3124
3125 /* This mount is not fully visible if there are any child mounts
3126 * that cover anything except for empty directories.
3127 */
3128 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
3129 struct inode *inode = child->mnt_mountpoint->d_inode;
3130 if (!S_ISDIR(inode->i_mode))
3131 goto next;
3132 if (inode->i_nlink > 2)
3133 goto next;
3134 }
3135 visible = true;
3136 goto found;
3137 next: ;
3138 }
3139 found:
3140 up_read(&namespace_sem);
3141 return visible;
3142 }
3143
3144 static struct ns_common *mntns_get(struct task_struct *task)
3145 {
3146 struct ns_common *ns = NULL;
3147 struct nsproxy *nsproxy;
3148
3149 task_lock(task);
3150 nsproxy = task->nsproxy;
3151 if (nsproxy) {
3152 ns = &nsproxy->mnt_ns->ns;
3153 get_mnt_ns(to_mnt_ns(ns));
3154 }
3155 task_unlock(task);
3156
3157 return ns;
3158 }
3159
3160 static void mntns_put(struct ns_common *ns)
3161 {
3162 put_mnt_ns(to_mnt_ns(ns));
3163 }
3164
3165 static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
3166 {
3167 struct fs_struct *fs = current->fs;
3168 struct mnt_namespace *mnt_ns = to_mnt_ns(ns);
3169 struct path root;
3170
3171 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
3172 !ns_capable(current_user_ns(), CAP_SYS_CHROOT) ||
3173 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
3174 return -EPERM;
3175
3176 if (fs->users != 1)
3177 return -EINVAL;
3178
3179 get_mnt_ns(mnt_ns);
3180 put_mnt_ns(nsproxy->mnt_ns);
3181 nsproxy->mnt_ns = mnt_ns;
3182
3183 /* Find the root */
3184 root.mnt = &mnt_ns->root->mnt;
3185 root.dentry = mnt_ns->root->mnt.mnt_root;
3186 path_get(&root);
3187 while(d_mountpoint(root.dentry) && follow_down_one(&root))
3188 ;
3189
3190 /* Update the pwd and root */
3191 set_fs_pwd(fs, &root);
3192 set_fs_root(fs, &root);
3193
3194 path_put(&root);
3195 return 0;
3196 }
3197
3198 const struct proc_ns_operations mntns_operations = {
3199 .name = "mnt",
3200 .type = CLONE_NEWNS,
3201 .get = mntns_get,
3202 .put = mntns_put,
3203 .install = mntns_install,
3204 };