]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/namespace.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-bionic-kernel.git] / fs / namespace.c
1 /*
2 * linux/fs/namespace.c
3 *
4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
6 *
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
8 * Heavily rewritten.
9 */
10
11 #include <linux/syscalls.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/smp_lock.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/acct.h>
18 #include <linux/capability.h>
19 #include <linux/cpumask.h>
20 #include <linux/module.h>
21 #include <linux/sysfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/mnt_namespace.h>
24 #include <linux/namei.h>
25 #include <linux/security.h>
26 #include <linux/mount.h>
27 #include <linux/ramfs.h>
28 #include <linux/log2.h>
29 #include <linux/idr.h>
30 #include <asm/uaccess.h>
31 #include <asm/unistd.h>
32 #include "pnode.h"
33 #include "internal.h"
34
35 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
36 #define HASH_SIZE (1UL << HASH_SHIFT)
37
38 /* spinlock for vfsmount related operations, inplace of dcache_lock */
39 __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
40
41 static int event;
42 static DEFINE_IDA(mnt_id_ida);
43 static DEFINE_IDA(mnt_group_ida);
44
45 static struct list_head *mount_hashtable __read_mostly;
46 static struct kmem_cache *mnt_cache __read_mostly;
47 static struct rw_semaphore namespace_sem;
48
49 /* /sys/fs */
50 struct kobject *fs_kobj;
51 EXPORT_SYMBOL_GPL(fs_kobj);
52
53 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
54 {
55 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
56 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
57 tmp = tmp + (tmp >> HASH_SHIFT);
58 return tmp & (HASH_SIZE - 1);
59 }
60
61 #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
62
63 /* allocation is serialized by namespace_sem */
64 static int mnt_alloc_id(struct vfsmount *mnt)
65 {
66 int res;
67
68 retry:
69 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
70 spin_lock(&vfsmount_lock);
71 res = ida_get_new(&mnt_id_ida, &mnt->mnt_id);
72 spin_unlock(&vfsmount_lock);
73 if (res == -EAGAIN)
74 goto retry;
75
76 return res;
77 }
78
79 static void mnt_free_id(struct vfsmount *mnt)
80 {
81 spin_lock(&vfsmount_lock);
82 ida_remove(&mnt_id_ida, mnt->mnt_id);
83 spin_unlock(&vfsmount_lock);
84 }
85
86 /*
87 * Allocate a new peer group ID
88 *
89 * mnt_group_ida is protected by namespace_sem
90 */
91 static int mnt_alloc_group_id(struct vfsmount *mnt)
92 {
93 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
94 return -ENOMEM;
95
96 return ida_get_new_above(&mnt_group_ida, 1, &mnt->mnt_group_id);
97 }
98
99 /*
100 * Release a peer group ID
101 */
102 void mnt_release_group_id(struct vfsmount *mnt)
103 {
104 ida_remove(&mnt_group_ida, mnt->mnt_group_id);
105 mnt->mnt_group_id = 0;
106 }
107
108 struct vfsmount *alloc_vfsmnt(const char *name)
109 {
110 struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
111 if (mnt) {
112 int err;
113
114 err = mnt_alloc_id(mnt);
115 if (err)
116 goto out_free_cache;
117
118 if (name) {
119 mnt->mnt_devname = kstrdup(name, GFP_KERNEL);
120 if (!mnt->mnt_devname)
121 goto out_free_id;
122 }
123
124 atomic_set(&mnt->mnt_count, 1);
125 INIT_LIST_HEAD(&mnt->mnt_hash);
126 INIT_LIST_HEAD(&mnt->mnt_child);
127 INIT_LIST_HEAD(&mnt->mnt_mounts);
128 INIT_LIST_HEAD(&mnt->mnt_list);
129 INIT_LIST_HEAD(&mnt->mnt_expire);
130 INIT_LIST_HEAD(&mnt->mnt_share);
131 INIT_LIST_HEAD(&mnt->mnt_slave_list);
132 INIT_LIST_HEAD(&mnt->mnt_slave);
133 atomic_set(&mnt->__mnt_writers, 0);
134 }
135 return mnt;
136
137 out_free_id:
138 mnt_free_id(mnt);
139 out_free_cache:
140 kmem_cache_free(mnt_cache, mnt);
141 return NULL;
142 }
143
144 /*
145 * Most r/o checks on a fs are for operations that take
146 * discrete amounts of time, like a write() or unlink().
147 * We must keep track of when those operations start
148 * (for permission checks) and when they end, so that
149 * we can determine when writes are able to occur to
150 * a filesystem.
151 */
152 /*
153 * __mnt_is_readonly: check whether a mount is read-only
154 * @mnt: the mount to check for its write status
155 *
156 * This shouldn't be used directly ouside of the VFS.
157 * It does not guarantee that the filesystem will stay
158 * r/w, just that it is right *now*. This can not and
159 * should not be used in place of IS_RDONLY(inode).
160 * mnt_want/drop_write() will _keep_ the filesystem
161 * r/w.
162 */
163 int __mnt_is_readonly(struct vfsmount *mnt)
164 {
165 if (mnt->mnt_flags & MNT_READONLY)
166 return 1;
167 if (mnt->mnt_sb->s_flags & MS_RDONLY)
168 return 1;
169 return 0;
170 }
171 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
172
173 struct mnt_writer {
174 /*
175 * If holding multiple instances of this lock, they
176 * must be ordered by cpu number.
177 */
178 spinlock_t lock;
179 struct lock_class_key lock_class; /* compiles out with !lockdep */
180 unsigned long count;
181 struct vfsmount *mnt;
182 } ____cacheline_aligned_in_smp;
183 static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
184
185 static int __init init_mnt_writers(void)
186 {
187 int cpu;
188 for_each_possible_cpu(cpu) {
189 struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
190 spin_lock_init(&writer->lock);
191 lockdep_set_class(&writer->lock, &writer->lock_class);
192 writer->count = 0;
193 }
194 return 0;
195 }
196 fs_initcall(init_mnt_writers);
197
198 static void unlock_mnt_writers(void)
199 {
200 int cpu;
201 struct mnt_writer *cpu_writer;
202
203 for_each_possible_cpu(cpu) {
204 cpu_writer = &per_cpu(mnt_writers, cpu);
205 spin_unlock(&cpu_writer->lock);
206 }
207 }
208
209 static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
210 {
211 if (!cpu_writer->mnt)
212 return;
213 /*
214 * This is in case anyone ever leaves an invalid,
215 * old ->mnt and a count of 0.
216 */
217 if (!cpu_writer->count)
218 return;
219 atomic_add(cpu_writer->count, &cpu_writer->mnt->__mnt_writers);
220 cpu_writer->count = 0;
221 }
222 /*
223 * must hold cpu_writer->lock
224 */
225 static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
226 struct vfsmount *mnt)
227 {
228 if (cpu_writer->mnt == mnt)
229 return;
230 __clear_mnt_count(cpu_writer);
231 cpu_writer->mnt = mnt;
232 }
233
234 /*
235 * Most r/o checks on a fs are for operations that take
236 * discrete amounts of time, like a write() or unlink().
237 * We must keep track of when those operations start
238 * (for permission checks) and when they end, so that
239 * we can determine when writes are able to occur to
240 * a filesystem.
241 */
242 /**
243 * mnt_want_write - get write access to a mount
244 * @mnt: the mount on which to take a write
245 *
246 * This tells the low-level filesystem that a write is
247 * about to be performed to it, and makes sure that
248 * writes are allowed before returning success. When
249 * the write operation is finished, mnt_drop_write()
250 * must be called. This is effectively a refcount.
251 */
252 int mnt_want_write(struct vfsmount *mnt)
253 {
254 int ret = 0;
255 struct mnt_writer *cpu_writer;
256
257 cpu_writer = &get_cpu_var(mnt_writers);
258 spin_lock(&cpu_writer->lock);
259 if (__mnt_is_readonly(mnt)) {
260 ret = -EROFS;
261 goto out;
262 }
263 use_cpu_writer_for_mount(cpu_writer, mnt);
264 cpu_writer->count++;
265 out:
266 spin_unlock(&cpu_writer->lock);
267 put_cpu_var(mnt_writers);
268 return ret;
269 }
270 EXPORT_SYMBOL_GPL(mnt_want_write);
271
272 static void lock_mnt_writers(void)
273 {
274 int cpu;
275 struct mnt_writer *cpu_writer;
276
277 for_each_possible_cpu(cpu) {
278 cpu_writer = &per_cpu(mnt_writers, cpu);
279 spin_lock(&cpu_writer->lock);
280 __clear_mnt_count(cpu_writer);
281 cpu_writer->mnt = NULL;
282 }
283 }
284
285 /*
286 * These per-cpu write counts are not guaranteed to have
287 * matched increments and decrements on any given cpu.
288 * A file open()ed for write on one cpu and close()d on
289 * another cpu will imbalance this count. Make sure it
290 * does not get too far out of whack.
291 */
292 static void handle_write_count_underflow(struct vfsmount *mnt)
293 {
294 if (atomic_read(&mnt->__mnt_writers) >=
295 MNT_WRITER_UNDERFLOW_LIMIT)
296 return;
297 /*
298 * It isn't necessary to hold all of the locks
299 * at the same time, but doing it this way makes
300 * us share a lot more code.
301 */
302 lock_mnt_writers();
303 /*
304 * vfsmount_lock is for mnt_flags.
305 */
306 spin_lock(&vfsmount_lock);
307 /*
308 * If coalescing the per-cpu writer counts did not
309 * get us back to a positive writer count, we have
310 * a bug.
311 */
312 if ((atomic_read(&mnt->__mnt_writers) < 0) &&
313 !(mnt->mnt_flags & MNT_IMBALANCED_WRITE_COUNT)) {
314 WARN(1, KERN_DEBUG "leak detected on mount(%p) writers "
315 "count: %d\n",
316 mnt, atomic_read(&mnt->__mnt_writers));
317 /* use the flag to keep the dmesg spam down */
318 mnt->mnt_flags |= MNT_IMBALANCED_WRITE_COUNT;
319 }
320 spin_unlock(&vfsmount_lock);
321 unlock_mnt_writers();
322 }
323
324 /**
325 * mnt_drop_write - give up write access to a mount
326 * @mnt: the mount on which to give up write access
327 *
328 * Tells the low-level filesystem that we are done
329 * performing writes to it. Must be matched with
330 * mnt_want_write() call above.
331 */
332 void mnt_drop_write(struct vfsmount *mnt)
333 {
334 int must_check_underflow = 0;
335 struct mnt_writer *cpu_writer;
336
337 cpu_writer = &get_cpu_var(mnt_writers);
338 spin_lock(&cpu_writer->lock);
339
340 use_cpu_writer_for_mount(cpu_writer, mnt);
341 if (cpu_writer->count > 0) {
342 cpu_writer->count--;
343 } else {
344 must_check_underflow = 1;
345 atomic_dec(&mnt->__mnt_writers);
346 }
347
348 spin_unlock(&cpu_writer->lock);
349 /*
350 * Logically, we could call this each time,
351 * but the __mnt_writers cacheline tends to
352 * be cold, and makes this expensive.
353 */
354 if (must_check_underflow)
355 handle_write_count_underflow(mnt);
356 /*
357 * This could be done right after the spinlock
358 * is taken because the spinlock keeps us on
359 * the cpu, and disables preemption. However,
360 * putting it here bounds the amount that
361 * __mnt_writers can underflow. Without it,
362 * we could theoretically wrap __mnt_writers.
363 */
364 put_cpu_var(mnt_writers);
365 }
366 EXPORT_SYMBOL_GPL(mnt_drop_write);
367
368 static int mnt_make_readonly(struct vfsmount *mnt)
369 {
370 int ret = 0;
371
372 lock_mnt_writers();
373 /*
374 * With all the locks held, this value is stable
375 */
376 if (atomic_read(&mnt->__mnt_writers) > 0) {
377 ret = -EBUSY;
378 goto out;
379 }
380 /*
381 * nobody can do a successful mnt_want_write() with all
382 * of the counts in MNT_DENIED_WRITE and the locks held.
383 */
384 spin_lock(&vfsmount_lock);
385 if (!ret)
386 mnt->mnt_flags |= MNT_READONLY;
387 spin_unlock(&vfsmount_lock);
388 out:
389 unlock_mnt_writers();
390 return ret;
391 }
392
393 static void __mnt_unmake_readonly(struct vfsmount *mnt)
394 {
395 spin_lock(&vfsmount_lock);
396 mnt->mnt_flags &= ~MNT_READONLY;
397 spin_unlock(&vfsmount_lock);
398 }
399
400 int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
401 {
402 mnt->mnt_sb = sb;
403 mnt->mnt_root = dget(sb->s_root);
404 return 0;
405 }
406
407 EXPORT_SYMBOL(simple_set_mnt);
408
409 void free_vfsmnt(struct vfsmount *mnt)
410 {
411 kfree(mnt->mnt_devname);
412 mnt_free_id(mnt);
413 kmem_cache_free(mnt_cache, mnt);
414 }
415
416 /*
417 * find the first or last mount at @dentry on vfsmount @mnt depending on
418 * @dir. If @dir is set return the first mount else return the last mount.
419 */
420 struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
421 int dir)
422 {
423 struct list_head *head = mount_hashtable + hash(mnt, dentry);
424 struct list_head *tmp = head;
425 struct vfsmount *p, *found = NULL;
426
427 for (;;) {
428 tmp = dir ? tmp->next : tmp->prev;
429 p = NULL;
430 if (tmp == head)
431 break;
432 p = list_entry(tmp, struct vfsmount, mnt_hash);
433 if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
434 found = p;
435 break;
436 }
437 }
438 return found;
439 }
440
441 /*
442 * lookup_mnt increments the ref count before returning
443 * the vfsmount struct.
444 */
445 struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
446 {
447 struct vfsmount *child_mnt;
448 spin_lock(&vfsmount_lock);
449 if ((child_mnt = __lookup_mnt(mnt, dentry, 1)))
450 mntget(child_mnt);
451 spin_unlock(&vfsmount_lock);
452 return child_mnt;
453 }
454
455 static inline int check_mnt(struct vfsmount *mnt)
456 {
457 return mnt->mnt_ns == current->nsproxy->mnt_ns;
458 }
459
460 static void touch_mnt_namespace(struct mnt_namespace *ns)
461 {
462 if (ns) {
463 ns->event = ++event;
464 wake_up_interruptible(&ns->poll);
465 }
466 }
467
468 static void __touch_mnt_namespace(struct mnt_namespace *ns)
469 {
470 if (ns && ns->event != event) {
471 ns->event = event;
472 wake_up_interruptible(&ns->poll);
473 }
474 }
475
476 static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
477 {
478 old_path->dentry = mnt->mnt_mountpoint;
479 old_path->mnt = mnt->mnt_parent;
480 mnt->mnt_parent = mnt;
481 mnt->mnt_mountpoint = mnt->mnt_root;
482 list_del_init(&mnt->mnt_child);
483 list_del_init(&mnt->mnt_hash);
484 old_path->dentry->d_mounted--;
485 }
486
487 void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
488 struct vfsmount *child_mnt)
489 {
490 child_mnt->mnt_parent = mntget(mnt);
491 child_mnt->mnt_mountpoint = dget(dentry);
492 dentry->d_mounted++;
493 }
494
495 static void attach_mnt(struct vfsmount *mnt, struct path *path)
496 {
497 mnt_set_mountpoint(path->mnt, path->dentry, mnt);
498 list_add_tail(&mnt->mnt_hash, mount_hashtable +
499 hash(path->mnt, path->dentry));
500 list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
501 }
502
503 /*
504 * the caller must hold vfsmount_lock
505 */
506 static void commit_tree(struct vfsmount *mnt)
507 {
508 struct vfsmount *parent = mnt->mnt_parent;
509 struct vfsmount *m;
510 LIST_HEAD(head);
511 struct mnt_namespace *n = parent->mnt_ns;
512
513 BUG_ON(parent == mnt);
514
515 list_add_tail(&head, &mnt->mnt_list);
516 list_for_each_entry(m, &head, mnt_list)
517 m->mnt_ns = n;
518 list_splice(&head, n->list.prev);
519
520 list_add_tail(&mnt->mnt_hash, mount_hashtable +
521 hash(parent, mnt->mnt_mountpoint));
522 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
523 touch_mnt_namespace(n);
524 }
525
526 static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
527 {
528 struct list_head *next = p->mnt_mounts.next;
529 if (next == &p->mnt_mounts) {
530 while (1) {
531 if (p == root)
532 return NULL;
533 next = p->mnt_child.next;
534 if (next != &p->mnt_parent->mnt_mounts)
535 break;
536 p = p->mnt_parent;
537 }
538 }
539 return list_entry(next, struct vfsmount, mnt_child);
540 }
541
542 static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
543 {
544 struct list_head *prev = p->mnt_mounts.prev;
545 while (prev != &p->mnt_mounts) {
546 p = list_entry(prev, struct vfsmount, mnt_child);
547 prev = p->mnt_mounts.prev;
548 }
549 return p;
550 }
551
552 static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
553 int flag)
554 {
555 struct super_block *sb = old->mnt_sb;
556 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
557
558 if (mnt) {
559 if (flag & (CL_SLAVE | CL_PRIVATE))
560 mnt->mnt_group_id = 0; /* not a peer of original */
561 else
562 mnt->mnt_group_id = old->mnt_group_id;
563
564 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
565 int err = mnt_alloc_group_id(mnt);
566 if (err)
567 goto out_free;
568 }
569
570 mnt->mnt_flags = old->mnt_flags;
571 atomic_inc(&sb->s_active);
572 mnt->mnt_sb = sb;
573 mnt->mnt_root = dget(root);
574 mnt->mnt_mountpoint = mnt->mnt_root;
575 mnt->mnt_parent = mnt;
576
577 if (flag & CL_SLAVE) {
578 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
579 mnt->mnt_master = old;
580 CLEAR_MNT_SHARED(mnt);
581 } else if (!(flag & CL_PRIVATE)) {
582 if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
583 list_add(&mnt->mnt_share, &old->mnt_share);
584 if (IS_MNT_SLAVE(old))
585 list_add(&mnt->mnt_slave, &old->mnt_slave);
586 mnt->mnt_master = old->mnt_master;
587 }
588 if (flag & CL_MAKE_SHARED)
589 set_mnt_shared(mnt);
590
591 /* stick the duplicate mount on the same expiry list
592 * as the original if that was on one */
593 if (flag & CL_EXPIRE) {
594 if (!list_empty(&old->mnt_expire))
595 list_add(&mnt->mnt_expire, &old->mnt_expire);
596 }
597 }
598 return mnt;
599
600 out_free:
601 free_vfsmnt(mnt);
602 return NULL;
603 }
604
605 static inline void __mntput(struct vfsmount *mnt)
606 {
607 int cpu;
608 struct super_block *sb = mnt->mnt_sb;
609 /*
610 * We don't have to hold all of the locks at the
611 * same time here because we know that we're the
612 * last reference to mnt and that no new writers
613 * can come in.
614 */
615 for_each_possible_cpu(cpu) {
616 struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu);
617 if (cpu_writer->mnt != mnt)
618 continue;
619 spin_lock(&cpu_writer->lock);
620 atomic_add(cpu_writer->count, &mnt->__mnt_writers);
621 cpu_writer->count = 0;
622 /*
623 * Might as well do this so that no one
624 * ever sees the pointer and expects
625 * it to be valid.
626 */
627 cpu_writer->mnt = NULL;
628 spin_unlock(&cpu_writer->lock);
629 }
630 /*
631 * This probably indicates that somebody messed
632 * up a mnt_want/drop_write() pair. If this
633 * happens, the filesystem was probably unable
634 * to make r/w->r/o transitions.
635 */
636 WARN_ON(atomic_read(&mnt->__mnt_writers));
637 dput(mnt->mnt_root);
638 free_vfsmnt(mnt);
639 deactivate_super(sb);
640 }
641
642 void mntput_no_expire(struct vfsmount *mnt)
643 {
644 repeat:
645 if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
646 if (likely(!mnt->mnt_pinned)) {
647 spin_unlock(&vfsmount_lock);
648 __mntput(mnt);
649 return;
650 }
651 atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
652 mnt->mnt_pinned = 0;
653 spin_unlock(&vfsmount_lock);
654 acct_auto_close_mnt(mnt);
655 security_sb_umount_close(mnt);
656 goto repeat;
657 }
658 }
659
660 EXPORT_SYMBOL(mntput_no_expire);
661
662 void mnt_pin(struct vfsmount *mnt)
663 {
664 spin_lock(&vfsmount_lock);
665 mnt->mnt_pinned++;
666 spin_unlock(&vfsmount_lock);
667 }
668
669 EXPORT_SYMBOL(mnt_pin);
670
671 void mnt_unpin(struct vfsmount *mnt)
672 {
673 spin_lock(&vfsmount_lock);
674 if (mnt->mnt_pinned) {
675 atomic_inc(&mnt->mnt_count);
676 mnt->mnt_pinned--;
677 }
678 spin_unlock(&vfsmount_lock);
679 }
680
681 EXPORT_SYMBOL(mnt_unpin);
682
683 static inline void mangle(struct seq_file *m, const char *s)
684 {
685 seq_escape(m, s, " \t\n\\");
686 }
687
688 /*
689 * Simple .show_options callback for filesystems which don't want to
690 * implement more complex mount option showing.
691 *
692 * See also save_mount_options().
693 */
694 int generic_show_options(struct seq_file *m, struct vfsmount *mnt)
695 {
696 const char *options = mnt->mnt_sb->s_options;
697
698 if (options != NULL && options[0]) {
699 seq_putc(m, ',');
700 mangle(m, options);
701 }
702
703 return 0;
704 }
705 EXPORT_SYMBOL(generic_show_options);
706
707 /*
708 * If filesystem uses generic_show_options(), this function should be
709 * called from the fill_super() callback.
710 *
711 * The .remount_fs callback usually needs to be handled in a special
712 * way, to make sure, that previous options are not overwritten if the
713 * remount fails.
714 *
715 * Also note, that if the filesystem's .remount_fs function doesn't
716 * reset all options to their default value, but changes only newly
717 * given options, then the displayed options will not reflect reality
718 * any more.
719 */
720 void save_mount_options(struct super_block *sb, char *options)
721 {
722 kfree(sb->s_options);
723 sb->s_options = kstrdup(options, GFP_KERNEL);
724 }
725 EXPORT_SYMBOL(save_mount_options);
726
727 #ifdef CONFIG_PROC_FS
728 /* iterator */
729 static void *m_start(struct seq_file *m, loff_t *pos)
730 {
731 struct proc_mounts *p = m->private;
732
733 down_read(&namespace_sem);
734 return seq_list_start(&p->ns->list, *pos);
735 }
736
737 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
738 {
739 struct proc_mounts *p = m->private;
740
741 return seq_list_next(v, &p->ns->list, pos);
742 }
743
744 static void m_stop(struct seq_file *m, void *v)
745 {
746 up_read(&namespace_sem);
747 }
748
749 struct proc_fs_info {
750 int flag;
751 const char *str;
752 };
753
754 static int show_sb_opts(struct seq_file *m, struct super_block *sb)
755 {
756 static const struct proc_fs_info fs_info[] = {
757 { MS_SYNCHRONOUS, ",sync" },
758 { MS_DIRSYNC, ",dirsync" },
759 { MS_MANDLOCK, ",mand" },
760 { 0, NULL }
761 };
762 const struct proc_fs_info *fs_infop;
763
764 for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
765 if (sb->s_flags & fs_infop->flag)
766 seq_puts(m, fs_infop->str);
767 }
768
769 return security_sb_show_options(m, sb);
770 }
771
772 static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
773 {
774 static const struct proc_fs_info mnt_info[] = {
775 { MNT_NOSUID, ",nosuid" },
776 { MNT_NODEV, ",nodev" },
777 { MNT_NOEXEC, ",noexec" },
778 { MNT_NOATIME, ",noatime" },
779 { MNT_NODIRATIME, ",nodiratime" },
780 { MNT_RELATIME, ",relatime" },
781 { 0, NULL }
782 };
783 const struct proc_fs_info *fs_infop;
784
785 for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
786 if (mnt->mnt_flags & fs_infop->flag)
787 seq_puts(m, fs_infop->str);
788 }
789 }
790
791 static void show_type(struct seq_file *m, struct super_block *sb)
792 {
793 mangle(m, sb->s_type->name);
794 if (sb->s_subtype && sb->s_subtype[0]) {
795 seq_putc(m, '.');
796 mangle(m, sb->s_subtype);
797 }
798 }
799
800 static int show_vfsmnt(struct seq_file *m, void *v)
801 {
802 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
803 int err = 0;
804 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
805
806 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
807 seq_putc(m, ' ');
808 seq_path(m, &mnt_path, " \t\n\\");
809 seq_putc(m, ' ');
810 show_type(m, mnt->mnt_sb);
811 seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
812 err = show_sb_opts(m, mnt->mnt_sb);
813 if (err)
814 goto out;
815 show_mnt_opts(m, mnt);
816 if (mnt->mnt_sb->s_op->show_options)
817 err = mnt->mnt_sb->s_op->show_options(m, mnt);
818 seq_puts(m, " 0 0\n");
819 out:
820 return err;
821 }
822
823 const struct seq_operations mounts_op = {
824 .start = m_start,
825 .next = m_next,
826 .stop = m_stop,
827 .show = show_vfsmnt
828 };
829
830 static int show_mountinfo(struct seq_file *m, void *v)
831 {
832 struct proc_mounts *p = m->private;
833 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
834 struct super_block *sb = mnt->mnt_sb;
835 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
836 struct path root = p->root;
837 int err = 0;
838
839 seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id,
840 MAJOR(sb->s_dev), MINOR(sb->s_dev));
841 seq_dentry(m, mnt->mnt_root, " \t\n\\");
842 seq_putc(m, ' ');
843 seq_path_root(m, &mnt_path, &root, " \t\n\\");
844 if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
845 /*
846 * Mountpoint is outside root, discard that one. Ugly,
847 * but less so than trying to do that in iterator in a
848 * race-free way (due to renames).
849 */
850 return SEQ_SKIP;
851 }
852 seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
853 show_mnt_opts(m, mnt);
854
855 /* Tagged fields ("foo:X" or "bar") */
856 if (IS_MNT_SHARED(mnt))
857 seq_printf(m, " shared:%i", mnt->mnt_group_id);
858 if (IS_MNT_SLAVE(mnt)) {
859 int master = mnt->mnt_master->mnt_group_id;
860 int dom = get_dominating_id(mnt, &p->root);
861 seq_printf(m, " master:%i", master);
862 if (dom && dom != master)
863 seq_printf(m, " propagate_from:%i", dom);
864 }
865 if (IS_MNT_UNBINDABLE(mnt))
866 seq_puts(m, " unbindable");
867
868 /* Filesystem specific data */
869 seq_puts(m, " - ");
870 show_type(m, sb);
871 seq_putc(m, ' ');
872 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
873 seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw");
874 err = show_sb_opts(m, sb);
875 if (err)
876 goto out;
877 if (sb->s_op->show_options)
878 err = sb->s_op->show_options(m, mnt);
879 seq_putc(m, '\n');
880 out:
881 return err;
882 }
883
884 const struct seq_operations mountinfo_op = {
885 .start = m_start,
886 .next = m_next,
887 .stop = m_stop,
888 .show = show_mountinfo,
889 };
890
891 static int show_vfsstat(struct seq_file *m, void *v)
892 {
893 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
894 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
895 int err = 0;
896
897 /* device */
898 if (mnt->mnt_devname) {
899 seq_puts(m, "device ");
900 mangle(m, mnt->mnt_devname);
901 } else
902 seq_puts(m, "no device");
903
904 /* mount point */
905 seq_puts(m, " mounted on ");
906 seq_path(m, &mnt_path, " \t\n\\");
907 seq_putc(m, ' ');
908
909 /* file system type */
910 seq_puts(m, "with fstype ");
911 show_type(m, mnt->mnt_sb);
912
913 /* optional statistics */
914 if (mnt->mnt_sb->s_op->show_stats) {
915 seq_putc(m, ' ');
916 err = mnt->mnt_sb->s_op->show_stats(m, mnt);
917 }
918
919 seq_putc(m, '\n');
920 return err;
921 }
922
923 const struct seq_operations mountstats_op = {
924 .start = m_start,
925 .next = m_next,
926 .stop = m_stop,
927 .show = show_vfsstat,
928 };
929 #endif /* CONFIG_PROC_FS */
930
931 /**
932 * may_umount_tree - check if a mount tree is busy
933 * @mnt: root of mount tree
934 *
935 * This is called to check if a tree of mounts has any
936 * open files, pwds, chroots or sub mounts that are
937 * busy.
938 */
939 int may_umount_tree(struct vfsmount *mnt)
940 {
941 int actual_refs = 0;
942 int minimum_refs = 0;
943 struct vfsmount *p;
944
945 spin_lock(&vfsmount_lock);
946 for (p = mnt; p; p = next_mnt(p, mnt)) {
947 actual_refs += atomic_read(&p->mnt_count);
948 minimum_refs += 2;
949 }
950 spin_unlock(&vfsmount_lock);
951
952 if (actual_refs > minimum_refs)
953 return 0;
954
955 return 1;
956 }
957
958 EXPORT_SYMBOL(may_umount_tree);
959
960 /**
961 * may_umount - check if a mount point is busy
962 * @mnt: root of mount
963 *
964 * This is called to check if a mount point has any
965 * open files, pwds, chroots or sub mounts. If the
966 * mount has sub mounts this will return busy
967 * regardless of whether the sub mounts are busy.
968 *
969 * Doesn't take quota and stuff into account. IOW, in some cases it will
970 * give false negatives. The main reason why it's here is that we need
971 * a non-destructive way to look for easily umountable filesystems.
972 */
973 int may_umount(struct vfsmount *mnt)
974 {
975 int ret = 1;
976 spin_lock(&vfsmount_lock);
977 if (propagate_mount_busy(mnt, 2))
978 ret = 0;
979 spin_unlock(&vfsmount_lock);
980 return ret;
981 }
982
983 EXPORT_SYMBOL(may_umount);
984
985 void release_mounts(struct list_head *head)
986 {
987 struct vfsmount *mnt;
988 while (!list_empty(head)) {
989 mnt = list_first_entry(head, struct vfsmount, mnt_hash);
990 list_del_init(&mnt->mnt_hash);
991 if (mnt->mnt_parent != mnt) {
992 struct dentry *dentry;
993 struct vfsmount *m;
994 spin_lock(&vfsmount_lock);
995 dentry = mnt->mnt_mountpoint;
996 m = mnt->mnt_parent;
997 mnt->mnt_mountpoint = mnt->mnt_root;
998 mnt->mnt_parent = mnt;
999 m->mnt_ghosts--;
1000 spin_unlock(&vfsmount_lock);
1001 dput(dentry);
1002 mntput(m);
1003 }
1004 mntput(mnt);
1005 }
1006 }
1007
1008 void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
1009 {
1010 struct vfsmount *p;
1011
1012 for (p = mnt; p; p = next_mnt(p, mnt))
1013 list_move(&p->mnt_hash, kill);
1014
1015 if (propagate)
1016 propagate_umount(kill);
1017
1018 list_for_each_entry(p, kill, mnt_hash) {
1019 list_del_init(&p->mnt_expire);
1020 list_del_init(&p->mnt_list);
1021 __touch_mnt_namespace(p->mnt_ns);
1022 p->mnt_ns = NULL;
1023 list_del_init(&p->mnt_child);
1024 if (p->mnt_parent != p) {
1025 p->mnt_parent->mnt_ghosts++;
1026 p->mnt_mountpoint->d_mounted--;
1027 }
1028 change_mnt_propagation(p, MS_PRIVATE);
1029 }
1030 }
1031
1032 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
1033
1034 static int do_umount(struct vfsmount *mnt, int flags)
1035 {
1036 struct super_block *sb = mnt->mnt_sb;
1037 int retval;
1038 LIST_HEAD(umount_list);
1039
1040 retval = security_sb_umount(mnt, flags);
1041 if (retval)
1042 return retval;
1043
1044 /*
1045 * Allow userspace to request a mountpoint be expired rather than
1046 * unmounting unconditionally. Unmount only happens if:
1047 * (1) the mark is already set (the mark is cleared by mntput())
1048 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1049 */
1050 if (flags & MNT_EXPIRE) {
1051 if (mnt == current->fs->root.mnt ||
1052 flags & (MNT_FORCE | MNT_DETACH))
1053 return -EINVAL;
1054
1055 if (atomic_read(&mnt->mnt_count) != 2)
1056 return -EBUSY;
1057
1058 if (!xchg(&mnt->mnt_expiry_mark, 1))
1059 return -EAGAIN;
1060 }
1061
1062 /*
1063 * If we may have to abort operations to get out of this
1064 * mount, and they will themselves hold resources we must
1065 * allow the fs to do things. In the Unix tradition of
1066 * 'Gee thats tricky lets do it in userspace' the umount_begin
1067 * might fail to complete on the first run through as other tasks
1068 * must return, and the like. Thats for the mount program to worry
1069 * about for the moment.
1070 */
1071
1072 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1073 lock_kernel();
1074 sb->s_op->umount_begin(sb);
1075 unlock_kernel();
1076 }
1077
1078 /*
1079 * No sense to grab the lock for this test, but test itself looks
1080 * somewhat bogus. Suggestions for better replacement?
1081 * Ho-hum... In principle, we might treat that as umount + switch
1082 * to rootfs. GC would eventually take care of the old vfsmount.
1083 * Actually it makes sense, especially if rootfs would contain a
1084 * /reboot - static binary that would close all descriptors and
1085 * call reboot(9). Then init(8) could umount root and exec /reboot.
1086 */
1087 if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1088 /*
1089 * Special case for "unmounting" root ...
1090 * we just try to remount it readonly.
1091 */
1092 down_write(&sb->s_umount);
1093 if (!(sb->s_flags & MS_RDONLY)) {
1094 lock_kernel();
1095 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
1096 unlock_kernel();
1097 }
1098 up_write(&sb->s_umount);
1099 return retval;
1100 }
1101
1102 down_write(&namespace_sem);
1103 spin_lock(&vfsmount_lock);
1104 event++;
1105
1106 if (!(flags & MNT_DETACH))
1107 shrink_submounts(mnt, &umount_list);
1108
1109 retval = -EBUSY;
1110 if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
1111 if (!list_empty(&mnt->mnt_list))
1112 umount_tree(mnt, 1, &umount_list);
1113 retval = 0;
1114 }
1115 spin_unlock(&vfsmount_lock);
1116 if (retval)
1117 security_sb_umount_busy(mnt);
1118 up_write(&namespace_sem);
1119 release_mounts(&umount_list);
1120 return retval;
1121 }
1122
1123 /*
1124 * Now umount can handle mount points as well as block devices.
1125 * This is important for filesystems which use unnamed block devices.
1126 *
1127 * We now support a flag for forced unmount like the other 'big iron'
1128 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1129 */
1130
1131 asmlinkage long sys_umount(char __user * name, int flags)
1132 {
1133 struct path path;
1134 int retval;
1135
1136 retval = user_path(name, &path);
1137 if (retval)
1138 goto out;
1139 retval = -EINVAL;
1140 if (path.dentry != path.mnt->mnt_root)
1141 goto dput_and_out;
1142 if (!check_mnt(path.mnt))
1143 goto dput_and_out;
1144
1145 retval = -EPERM;
1146 if (!capable(CAP_SYS_ADMIN))
1147 goto dput_and_out;
1148
1149 retval = do_umount(path.mnt, flags);
1150 dput_and_out:
1151 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1152 dput(path.dentry);
1153 mntput_no_expire(path.mnt);
1154 out:
1155 return retval;
1156 }
1157
1158 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1159
1160 /*
1161 * The 2.0 compatible umount. No flags.
1162 */
1163 asmlinkage long sys_oldumount(char __user * name)
1164 {
1165 return sys_umount(name, 0);
1166 }
1167
1168 #endif
1169
1170 static int mount_is_safe(struct nameidata *nd)
1171 {
1172 if (capable(CAP_SYS_ADMIN))
1173 return 0;
1174 return -EPERM;
1175 #ifdef notyet
1176 if (S_ISLNK(nd->path.dentry->d_inode->i_mode))
1177 return -EPERM;
1178 if (nd->path.dentry->d_inode->i_mode & S_ISVTX) {
1179 if (current->uid != nd->path.dentry->d_inode->i_uid)
1180 return -EPERM;
1181 }
1182 if (vfs_permission(nd, MAY_WRITE))
1183 return -EPERM;
1184 return 0;
1185 #endif
1186 }
1187
1188 struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
1189 int flag)
1190 {
1191 struct vfsmount *res, *p, *q, *r, *s;
1192 struct path path;
1193
1194 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
1195 return NULL;
1196
1197 res = q = clone_mnt(mnt, dentry, flag);
1198 if (!q)
1199 goto Enomem;
1200 q->mnt_mountpoint = mnt->mnt_mountpoint;
1201
1202 p = mnt;
1203 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1204 if (!is_subdir(r->mnt_mountpoint, dentry))
1205 continue;
1206
1207 for (s = r; s; s = next_mnt(s, r)) {
1208 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
1209 s = skip_mnt_tree(s);
1210 continue;
1211 }
1212 while (p != s->mnt_parent) {
1213 p = p->mnt_parent;
1214 q = q->mnt_parent;
1215 }
1216 p = s;
1217 path.mnt = q;
1218 path.dentry = p->mnt_mountpoint;
1219 q = clone_mnt(p, p->mnt_root, flag);
1220 if (!q)
1221 goto Enomem;
1222 spin_lock(&vfsmount_lock);
1223 list_add_tail(&q->mnt_list, &res->mnt_list);
1224 attach_mnt(q, &path);
1225 spin_unlock(&vfsmount_lock);
1226 }
1227 }
1228 return res;
1229 Enomem:
1230 if (res) {
1231 LIST_HEAD(umount_list);
1232 spin_lock(&vfsmount_lock);
1233 umount_tree(res, 0, &umount_list);
1234 spin_unlock(&vfsmount_lock);
1235 release_mounts(&umount_list);
1236 }
1237 return NULL;
1238 }
1239
1240 struct vfsmount *collect_mounts(struct vfsmount *mnt, struct dentry *dentry)
1241 {
1242 struct vfsmount *tree;
1243 down_write(&namespace_sem);
1244 tree = copy_tree(mnt, dentry, CL_COPY_ALL | CL_PRIVATE);
1245 up_write(&namespace_sem);
1246 return tree;
1247 }
1248
1249 void drop_collected_mounts(struct vfsmount *mnt)
1250 {
1251 LIST_HEAD(umount_list);
1252 down_write(&namespace_sem);
1253 spin_lock(&vfsmount_lock);
1254 umount_tree(mnt, 0, &umount_list);
1255 spin_unlock(&vfsmount_lock);
1256 up_write(&namespace_sem);
1257 release_mounts(&umount_list);
1258 }
1259
1260 static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
1261 {
1262 struct vfsmount *p;
1263
1264 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1265 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1266 mnt_release_group_id(p);
1267 }
1268 }
1269
1270 static int invent_group_ids(struct vfsmount *mnt, bool recurse)
1271 {
1272 struct vfsmount *p;
1273
1274 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1275 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1276 int err = mnt_alloc_group_id(p);
1277 if (err) {
1278 cleanup_group_ids(mnt, p);
1279 return err;
1280 }
1281 }
1282 }
1283
1284 return 0;
1285 }
1286
1287 /*
1288 * @source_mnt : mount tree to be attached
1289 * @nd : place the mount tree @source_mnt is attached
1290 * @parent_nd : if non-null, detach the source_mnt from its parent and
1291 * store the parent mount and mountpoint dentry.
1292 * (done when source_mnt is moved)
1293 *
1294 * NOTE: in the table below explains the semantics when a source mount
1295 * of a given type is attached to a destination mount of a given type.
1296 * ---------------------------------------------------------------------------
1297 * | BIND MOUNT OPERATION |
1298 * |**************************************************************************
1299 * | source-->| shared | private | slave | unbindable |
1300 * | dest | | | | |
1301 * | | | | | | |
1302 * | v | | | | |
1303 * |**************************************************************************
1304 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1305 * | | | | | |
1306 * |non-shared| shared (+) | private | slave (*) | invalid |
1307 * ***************************************************************************
1308 * A bind operation clones the source mount and mounts the clone on the
1309 * destination mount.
1310 *
1311 * (++) the cloned mount is propagated to all the mounts in the propagation
1312 * tree of the destination mount and the cloned mount is added to
1313 * the peer group of the source mount.
1314 * (+) the cloned mount is created under the destination mount and is marked
1315 * as shared. The cloned mount is added to the peer group of the source
1316 * mount.
1317 * (+++) the mount is propagated to all the mounts in the propagation tree
1318 * of the destination mount and the cloned mount is made slave
1319 * of the same master as that of the source mount. The cloned mount
1320 * is marked as 'shared and slave'.
1321 * (*) the cloned mount is made a slave of the same master as that of the
1322 * source mount.
1323 *
1324 * ---------------------------------------------------------------------------
1325 * | MOVE MOUNT OPERATION |
1326 * |**************************************************************************
1327 * | source-->| shared | private | slave | unbindable |
1328 * | dest | | | | |
1329 * | | | | | | |
1330 * | v | | | | |
1331 * |**************************************************************************
1332 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1333 * | | | | | |
1334 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1335 * ***************************************************************************
1336 *
1337 * (+) the mount is moved to the destination. And is then propagated to
1338 * all the mounts in the propagation tree of the destination mount.
1339 * (+*) the mount is moved to the destination.
1340 * (+++) the mount is moved to the destination and is then propagated to
1341 * all the mounts belonging to the destination mount's propagation tree.
1342 * the mount is marked as 'shared and slave'.
1343 * (*) the mount continues to be a slave at the new location.
1344 *
1345 * if the source mount is a tree, the operations explained above is
1346 * applied to each mount in the tree.
1347 * Must be called without spinlocks held, since this function can sleep
1348 * in allocations.
1349 */
1350 static int attach_recursive_mnt(struct vfsmount *source_mnt,
1351 struct path *path, struct path *parent_path)
1352 {
1353 LIST_HEAD(tree_list);
1354 struct vfsmount *dest_mnt = path->mnt;
1355 struct dentry *dest_dentry = path->dentry;
1356 struct vfsmount *child, *p;
1357 int err;
1358
1359 if (IS_MNT_SHARED(dest_mnt)) {
1360 err = invent_group_ids(source_mnt, true);
1361 if (err)
1362 goto out;
1363 }
1364 err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list);
1365 if (err)
1366 goto out_cleanup_ids;
1367
1368 if (IS_MNT_SHARED(dest_mnt)) {
1369 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1370 set_mnt_shared(p);
1371 }
1372
1373 spin_lock(&vfsmount_lock);
1374 if (parent_path) {
1375 detach_mnt(source_mnt, parent_path);
1376 attach_mnt(source_mnt, path);
1377 touch_mnt_namespace(current->nsproxy->mnt_ns);
1378 } else {
1379 mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
1380 commit_tree(source_mnt);
1381 }
1382
1383 list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
1384 list_del_init(&child->mnt_hash);
1385 commit_tree(child);
1386 }
1387 spin_unlock(&vfsmount_lock);
1388 return 0;
1389
1390 out_cleanup_ids:
1391 if (IS_MNT_SHARED(dest_mnt))
1392 cleanup_group_ids(source_mnt, NULL);
1393 out:
1394 return err;
1395 }
1396
1397 static int graft_tree(struct vfsmount *mnt, struct path *path)
1398 {
1399 int err;
1400 if (mnt->mnt_sb->s_flags & MS_NOUSER)
1401 return -EINVAL;
1402
1403 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1404 S_ISDIR(mnt->mnt_root->d_inode->i_mode))
1405 return -ENOTDIR;
1406
1407 err = -ENOENT;
1408 mutex_lock(&path->dentry->d_inode->i_mutex);
1409 if (IS_DEADDIR(path->dentry->d_inode))
1410 goto out_unlock;
1411
1412 err = security_sb_check_sb(mnt, path);
1413 if (err)
1414 goto out_unlock;
1415
1416 err = -ENOENT;
1417 if (IS_ROOT(path->dentry) || !d_unhashed(path->dentry))
1418 err = attach_recursive_mnt(mnt, path, NULL);
1419 out_unlock:
1420 mutex_unlock(&path->dentry->d_inode->i_mutex);
1421 if (!err)
1422 security_sb_post_addmount(mnt, path);
1423 return err;
1424 }
1425
1426 /*
1427 * recursively change the type of the mountpoint.
1428 * noinline this do_mount helper to save do_mount stack space.
1429 */
1430 static noinline int do_change_type(struct nameidata *nd, int flag)
1431 {
1432 struct vfsmount *m, *mnt = nd->path.mnt;
1433 int recurse = flag & MS_REC;
1434 int type = flag & ~MS_REC;
1435 int err = 0;
1436
1437 if (!capable(CAP_SYS_ADMIN))
1438 return -EPERM;
1439
1440 if (nd->path.dentry != nd->path.mnt->mnt_root)
1441 return -EINVAL;
1442
1443 down_write(&namespace_sem);
1444 if (type == MS_SHARED) {
1445 err = invent_group_ids(mnt, recurse);
1446 if (err)
1447 goto out_unlock;
1448 }
1449
1450 spin_lock(&vfsmount_lock);
1451 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1452 change_mnt_propagation(m, type);
1453 spin_unlock(&vfsmount_lock);
1454
1455 out_unlock:
1456 up_write(&namespace_sem);
1457 return err;
1458 }
1459
1460 /*
1461 * do loopback mount.
1462 * noinline this do_mount helper to save do_mount stack space.
1463 */
1464 static noinline int do_loopback(struct nameidata *nd, char *old_name,
1465 int recurse)
1466 {
1467 struct nameidata old_nd;
1468 struct vfsmount *mnt = NULL;
1469 int err = mount_is_safe(nd);
1470 if (err)
1471 return err;
1472 if (!old_name || !*old_name)
1473 return -EINVAL;
1474 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
1475 if (err)
1476 return err;
1477
1478 down_write(&namespace_sem);
1479 err = -EINVAL;
1480 if (IS_MNT_UNBINDABLE(old_nd.path.mnt))
1481 goto out;
1482
1483 if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt))
1484 goto out;
1485
1486 err = -ENOMEM;
1487 if (recurse)
1488 mnt = copy_tree(old_nd.path.mnt, old_nd.path.dentry, 0);
1489 else
1490 mnt = clone_mnt(old_nd.path.mnt, old_nd.path.dentry, 0);
1491
1492 if (!mnt)
1493 goto out;
1494
1495 err = graft_tree(mnt, &nd->path);
1496 if (err) {
1497 LIST_HEAD(umount_list);
1498 spin_lock(&vfsmount_lock);
1499 umount_tree(mnt, 0, &umount_list);
1500 spin_unlock(&vfsmount_lock);
1501 release_mounts(&umount_list);
1502 }
1503
1504 out:
1505 up_write(&namespace_sem);
1506 path_put(&old_nd.path);
1507 return err;
1508 }
1509
1510 static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
1511 {
1512 int error = 0;
1513 int readonly_request = 0;
1514
1515 if (ms_flags & MS_RDONLY)
1516 readonly_request = 1;
1517 if (readonly_request == __mnt_is_readonly(mnt))
1518 return 0;
1519
1520 if (readonly_request)
1521 error = mnt_make_readonly(mnt);
1522 else
1523 __mnt_unmake_readonly(mnt);
1524 return error;
1525 }
1526
1527 /*
1528 * change filesystem flags. dir should be a physical root of filesystem.
1529 * If you've mounted a non-root directory somewhere and want to do remount
1530 * on it - tough luck.
1531 * noinline this do_mount helper to save do_mount stack space.
1532 */
1533 static noinline int do_remount(struct nameidata *nd, int flags, int mnt_flags,
1534 void *data)
1535 {
1536 int err;
1537 struct super_block *sb = nd->path.mnt->mnt_sb;
1538
1539 if (!capable(CAP_SYS_ADMIN))
1540 return -EPERM;
1541
1542 if (!check_mnt(nd->path.mnt))
1543 return -EINVAL;
1544
1545 if (nd->path.dentry != nd->path.mnt->mnt_root)
1546 return -EINVAL;
1547
1548 down_write(&sb->s_umount);
1549 if (flags & MS_BIND)
1550 err = change_mount_flags(nd->path.mnt, flags);
1551 else
1552 err = do_remount_sb(sb, flags, data, 0);
1553 if (!err)
1554 nd->path.mnt->mnt_flags = mnt_flags;
1555 up_write(&sb->s_umount);
1556 if (!err)
1557 security_sb_post_remount(nd->path.mnt, flags, data);
1558 return err;
1559 }
1560
1561 static inline int tree_contains_unbindable(struct vfsmount *mnt)
1562 {
1563 struct vfsmount *p;
1564 for (p = mnt; p; p = next_mnt(p, mnt)) {
1565 if (IS_MNT_UNBINDABLE(p))
1566 return 1;
1567 }
1568 return 0;
1569 }
1570
1571 /*
1572 * noinline this do_mount helper to save do_mount stack space.
1573 */
1574 static noinline int do_move_mount(struct nameidata *nd, char *old_name)
1575 {
1576 struct nameidata old_nd;
1577 struct path parent_path;
1578 struct vfsmount *p;
1579 int err = 0;
1580 if (!capable(CAP_SYS_ADMIN))
1581 return -EPERM;
1582 if (!old_name || !*old_name)
1583 return -EINVAL;
1584 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
1585 if (err)
1586 return err;
1587
1588 down_write(&namespace_sem);
1589 while (d_mountpoint(nd->path.dentry) &&
1590 follow_down(&nd->path.mnt, &nd->path.dentry))
1591 ;
1592 err = -EINVAL;
1593 if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt))
1594 goto out;
1595
1596 err = -ENOENT;
1597 mutex_lock(&nd->path.dentry->d_inode->i_mutex);
1598 if (IS_DEADDIR(nd->path.dentry->d_inode))
1599 goto out1;
1600
1601 if (!IS_ROOT(nd->path.dentry) && d_unhashed(nd->path.dentry))
1602 goto out1;
1603
1604 err = -EINVAL;
1605 if (old_nd.path.dentry != old_nd.path.mnt->mnt_root)
1606 goto out1;
1607
1608 if (old_nd.path.mnt == old_nd.path.mnt->mnt_parent)
1609 goto out1;
1610
1611 if (S_ISDIR(nd->path.dentry->d_inode->i_mode) !=
1612 S_ISDIR(old_nd.path.dentry->d_inode->i_mode))
1613 goto out1;
1614 /*
1615 * Don't move a mount residing in a shared parent.
1616 */
1617 if (old_nd.path.mnt->mnt_parent &&
1618 IS_MNT_SHARED(old_nd.path.mnt->mnt_parent))
1619 goto out1;
1620 /*
1621 * Don't move a mount tree containing unbindable mounts to a destination
1622 * mount which is shared.
1623 */
1624 if (IS_MNT_SHARED(nd->path.mnt) &&
1625 tree_contains_unbindable(old_nd.path.mnt))
1626 goto out1;
1627 err = -ELOOP;
1628 for (p = nd->path.mnt; p->mnt_parent != p; p = p->mnt_parent)
1629 if (p == old_nd.path.mnt)
1630 goto out1;
1631
1632 err = attach_recursive_mnt(old_nd.path.mnt, &nd->path, &parent_path);
1633 if (err)
1634 goto out1;
1635
1636 /* if the mount is moved, it should no longer be expire
1637 * automatically */
1638 list_del_init(&old_nd.path.mnt->mnt_expire);
1639 out1:
1640 mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
1641 out:
1642 up_write(&namespace_sem);
1643 if (!err)
1644 path_put(&parent_path);
1645 path_put(&old_nd.path);
1646 return err;
1647 }
1648
1649 /*
1650 * create a new mount for userspace and request it to be added into the
1651 * namespace's tree
1652 * noinline this do_mount helper to save do_mount stack space.
1653 */
1654 static noinline int do_new_mount(struct nameidata *nd, char *type, int flags,
1655 int mnt_flags, char *name, void *data)
1656 {
1657 struct vfsmount *mnt;
1658
1659 if (!type || !memchr(type, 0, PAGE_SIZE))
1660 return -EINVAL;
1661
1662 /* we need capabilities... */
1663 if (!capable(CAP_SYS_ADMIN))
1664 return -EPERM;
1665
1666 mnt = do_kern_mount(type, flags, name, data);
1667 if (IS_ERR(mnt))
1668 return PTR_ERR(mnt);
1669
1670 return do_add_mount(mnt, &nd->path, mnt_flags, NULL);
1671 }
1672
1673 /*
1674 * add a mount into a namespace's mount tree
1675 * - provide the option of adding the new mount to an expiration list
1676 */
1677 int do_add_mount(struct vfsmount *newmnt, struct path *path,
1678 int mnt_flags, struct list_head *fslist)
1679 {
1680 int err;
1681
1682 down_write(&namespace_sem);
1683 /* Something was mounted here while we slept */
1684 while (d_mountpoint(path->dentry) &&
1685 follow_down(&path->mnt, &path->dentry))
1686 ;
1687 err = -EINVAL;
1688 if (!check_mnt(path->mnt))
1689 goto unlock;
1690
1691 /* Refuse the same filesystem on the same mount point */
1692 err = -EBUSY;
1693 if (path->mnt->mnt_sb == newmnt->mnt_sb &&
1694 path->mnt->mnt_root == path->dentry)
1695 goto unlock;
1696
1697 err = -EINVAL;
1698 if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
1699 goto unlock;
1700
1701 newmnt->mnt_flags = mnt_flags;
1702 if ((err = graft_tree(newmnt, path)))
1703 goto unlock;
1704
1705 if (fslist) /* add to the specified expiration list */
1706 list_add_tail(&newmnt->mnt_expire, fslist);
1707
1708 up_write(&namespace_sem);
1709 return 0;
1710
1711 unlock:
1712 up_write(&namespace_sem);
1713 mntput(newmnt);
1714 return err;
1715 }
1716
1717 EXPORT_SYMBOL_GPL(do_add_mount);
1718
1719 /*
1720 * process a list of expirable mountpoints with the intent of discarding any
1721 * mountpoints that aren't in use and haven't been touched since last we came
1722 * here
1723 */
1724 void mark_mounts_for_expiry(struct list_head *mounts)
1725 {
1726 struct vfsmount *mnt, *next;
1727 LIST_HEAD(graveyard);
1728 LIST_HEAD(umounts);
1729
1730 if (list_empty(mounts))
1731 return;
1732
1733 down_write(&namespace_sem);
1734 spin_lock(&vfsmount_lock);
1735
1736 /* extract from the expiration list every vfsmount that matches the
1737 * following criteria:
1738 * - only referenced by its parent vfsmount
1739 * - still marked for expiry (marked on the last call here; marks are
1740 * cleared by mntput())
1741 */
1742 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
1743 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
1744 propagate_mount_busy(mnt, 1))
1745 continue;
1746 list_move(&mnt->mnt_expire, &graveyard);
1747 }
1748 while (!list_empty(&graveyard)) {
1749 mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire);
1750 touch_mnt_namespace(mnt->mnt_ns);
1751 umount_tree(mnt, 1, &umounts);
1752 }
1753 spin_unlock(&vfsmount_lock);
1754 up_write(&namespace_sem);
1755
1756 release_mounts(&umounts);
1757 }
1758
1759 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
1760
1761 /*
1762 * Ripoff of 'select_parent()'
1763 *
1764 * search the list of submounts for a given mountpoint, and move any
1765 * shrinkable submounts to the 'graveyard' list.
1766 */
1767 static int select_submounts(struct vfsmount *parent, struct list_head *graveyard)
1768 {
1769 struct vfsmount *this_parent = parent;
1770 struct list_head *next;
1771 int found = 0;
1772
1773 repeat:
1774 next = this_parent->mnt_mounts.next;
1775 resume:
1776 while (next != &this_parent->mnt_mounts) {
1777 struct list_head *tmp = next;
1778 struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child);
1779
1780 next = tmp->next;
1781 if (!(mnt->mnt_flags & MNT_SHRINKABLE))
1782 continue;
1783 /*
1784 * Descend a level if the d_mounts list is non-empty.
1785 */
1786 if (!list_empty(&mnt->mnt_mounts)) {
1787 this_parent = mnt;
1788 goto repeat;
1789 }
1790
1791 if (!propagate_mount_busy(mnt, 1)) {
1792 list_move_tail(&mnt->mnt_expire, graveyard);
1793 found++;
1794 }
1795 }
1796 /*
1797 * All done at this level ... ascend and resume the search
1798 */
1799 if (this_parent != parent) {
1800 next = this_parent->mnt_child.next;
1801 this_parent = this_parent->mnt_parent;
1802 goto resume;
1803 }
1804 return found;
1805 }
1806
1807 /*
1808 * process a list of expirable mountpoints with the intent of discarding any
1809 * submounts of a specific parent mountpoint
1810 */
1811 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
1812 {
1813 LIST_HEAD(graveyard);
1814 struct vfsmount *m;
1815
1816 /* extract submounts of 'mountpoint' from the expiration list */
1817 while (select_submounts(mnt, &graveyard)) {
1818 while (!list_empty(&graveyard)) {
1819 m = list_first_entry(&graveyard, struct vfsmount,
1820 mnt_expire);
1821 touch_mnt_namespace(mnt->mnt_ns);
1822 umount_tree(mnt, 1, umounts);
1823 }
1824 }
1825 }
1826
1827 /*
1828 * Some copy_from_user() implementations do not return the exact number of
1829 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
1830 * Note that this function differs from copy_from_user() in that it will oops
1831 * on bad values of `to', rather than returning a short copy.
1832 */
1833 static long exact_copy_from_user(void *to, const void __user * from,
1834 unsigned long n)
1835 {
1836 char *t = to;
1837 const char __user *f = from;
1838 char c;
1839
1840 if (!access_ok(VERIFY_READ, from, n))
1841 return n;
1842
1843 while (n) {
1844 if (__get_user(c, f)) {
1845 memset(t, 0, n);
1846 break;
1847 }
1848 *t++ = c;
1849 f++;
1850 n--;
1851 }
1852 return n;
1853 }
1854
1855 int copy_mount_options(const void __user * data, unsigned long *where)
1856 {
1857 int i;
1858 unsigned long page;
1859 unsigned long size;
1860
1861 *where = 0;
1862 if (!data)
1863 return 0;
1864
1865 if (!(page = __get_free_page(GFP_KERNEL)))
1866 return -ENOMEM;
1867
1868 /* We only care that *some* data at the address the user
1869 * gave us is valid. Just in case, we'll zero
1870 * the remainder of the page.
1871 */
1872 /* copy_from_user cannot cross TASK_SIZE ! */
1873 size = TASK_SIZE - (unsigned long)data;
1874 if (size > PAGE_SIZE)
1875 size = PAGE_SIZE;
1876
1877 i = size - exact_copy_from_user((void *)page, data, size);
1878 if (!i) {
1879 free_page(page);
1880 return -EFAULT;
1881 }
1882 if (i != PAGE_SIZE)
1883 memset((char *)page + i, 0, PAGE_SIZE - i);
1884 *where = page;
1885 return 0;
1886 }
1887
1888 /*
1889 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
1890 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
1891 *
1892 * data is a (void *) that can point to any structure up to
1893 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
1894 * information (or be NULL).
1895 *
1896 * Pre-0.97 versions of mount() didn't have a flags word.
1897 * When the flags word was introduced its top half was required
1898 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
1899 * Therefore, if this magic number is present, it carries no information
1900 * and must be discarded.
1901 */
1902 long do_mount(char *dev_name, char *dir_name, char *type_page,
1903 unsigned long flags, void *data_page)
1904 {
1905 struct nameidata nd;
1906 int retval = 0;
1907 int mnt_flags = 0;
1908
1909 /* Discard magic */
1910 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
1911 flags &= ~MS_MGC_MSK;
1912
1913 /* Basic sanity checks */
1914
1915 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
1916 return -EINVAL;
1917 if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
1918 return -EINVAL;
1919
1920 if (data_page)
1921 ((char *)data_page)[PAGE_SIZE - 1] = 0;
1922
1923 /* Separate the per-mountpoint flags */
1924 if (flags & MS_NOSUID)
1925 mnt_flags |= MNT_NOSUID;
1926 if (flags & MS_NODEV)
1927 mnt_flags |= MNT_NODEV;
1928 if (flags & MS_NOEXEC)
1929 mnt_flags |= MNT_NOEXEC;
1930 if (flags & MS_NOATIME)
1931 mnt_flags |= MNT_NOATIME;
1932 if (flags & MS_NODIRATIME)
1933 mnt_flags |= MNT_NODIRATIME;
1934 if (flags & MS_RELATIME)
1935 mnt_flags |= MNT_RELATIME;
1936 if (flags & MS_RDONLY)
1937 mnt_flags |= MNT_READONLY;
1938
1939 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
1940 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT);
1941
1942 /* ... and get the mountpoint */
1943 retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
1944 if (retval)
1945 return retval;
1946
1947 retval = security_sb_mount(dev_name, &nd.path,
1948 type_page, flags, data_page);
1949 if (retval)
1950 goto dput_out;
1951
1952 if (flags & MS_REMOUNT)
1953 retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
1954 data_page);
1955 else if (flags & MS_BIND)
1956 retval = do_loopback(&nd, dev_name, flags & MS_REC);
1957 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
1958 retval = do_change_type(&nd, flags);
1959 else if (flags & MS_MOVE)
1960 retval = do_move_mount(&nd, dev_name);
1961 else
1962 retval = do_new_mount(&nd, type_page, flags, mnt_flags,
1963 dev_name, data_page);
1964 dput_out:
1965 path_put(&nd.path);
1966 return retval;
1967 }
1968
1969 /*
1970 * Allocate a new namespace structure and populate it with contents
1971 * copied from the namespace of the passed in task structure.
1972 */
1973 static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
1974 struct fs_struct *fs)
1975 {
1976 struct mnt_namespace *new_ns;
1977 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
1978 struct vfsmount *p, *q;
1979
1980 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
1981 if (!new_ns)
1982 return ERR_PTR(-ENOMEM);
1983
1984 atomic_set(&new_ns->count, 1);
1985 INIT_LIST_HEAD(&new_ns->list);
1986 init_waitqueue_head(&new_ns->poll);
1987 new_ns->event = 0;
1988
1989 down_write(&namespace_sem);
1990 /* First pass: copy the tree topology */
1991 new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root,
1992 CL_COPY_ALL | CL_EXPIRE);
1993 if (!new_ns->root) {
1994 up_write(&namespace_sem);
1995 kfree(new_ns);
1996 return ERR_PTR(-ENOMEM);;
1997 }
1998 spin_lock(&vfsmount_lock);
1999 list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
2000 spin_unlock(&vfsmount_lock);
2001
2002 /*
2003 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2004 * as belonging to new namespace. We have already acquired a private
2005 * fs_struct, so tsk->fs->lock is not needed.
2006 */
2007 p = mnt_ns->root;
2008 q = new_ns->root;
2009 while (p) {
2010 q->mnt_ns = new_ns;
2011 if (fs) {
2012 if (p == fs->root.mnt) {
2013 rootmnt = p;
2014 fs->root.mnt = mntget(q);
2015 }
2016 if (p == fs->pwd.mnt) {
2017 pwdmnt = p;
2018 fs->pwd.mnt = mntget(q);
2019 }
2020 }
2021 p = next_mnt(p, mnt_ns->root);
2022 q = next_mnt(q, new_ns->root);
2023 }
2024 up_write(&namespace_sem);
2025
2026 if (rootmnt)
2027 mntput(rootmnt);
2028 if (pwdmnt)
2029 mntput(pwdmnt);
2030
2031 return new_ns;
2032 }
2033
2034 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
2035 struct fs_struct *new_fs)
2036 {
2037 struct mnt_namespace *new_ns;
2038
2039 BUG_ON(!ns);
2040 get_mnt_ns(ns);
2041
2042 if (!(flags & CLONE_NEWNS))
2043 return ns;
2044
2045 new_ns = dup_mnt_ns(ns, new_fs);
2046
2047 put_mnt_ns(ns);
2048 return new_ns;
2049 }
2050
2051 asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
2052 char __user * type, unsigned long flags,
2053 void __user * data)
2054 {
2055 int retval;
2056 unsigned long data_page;
2057 unsigned long type_page;
2058 unsigned long dev_page;
2059 char *dir_page;
2060
2061 retval = copy_mount_options(type, &type_page);
2062 if (retval < 0)
2063 return retval;
2064
2065 dir_page = getname(dir_name);
2066 retval = PTR_ERR(dir_page);
2067 if (IS_ERR(dir_page))
2068 goto out1;
2069
2070 retval = copy_mount_options(dev_name, &dev_page);
2071 if (retval < 0)
2072 goto out2;
2073
2074 retval = copy_mount_options(data, &data_page);
2075 if (retval < 0)
2076 goto out3;
2077
2078 lock_kernel();
2079 retval = do_mount((char *)dev_page, dir_page, (char *)type_page,
2080 flags, (void *)data_page);
2081 unlock_kernel();
2082 free_page(data_page);
2083
2084 out3:
2085 free_page(dev_page);
2086 out2:
2087 putname(dir_page);
2088 out1:
2089 free_page(type_page);
2090 return retval;
2091 }
2092
2093 /*
2094 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
2095 * It can block. Requires the big lock held.
2096 */
2097 void set_fs_root(struct fs_struct *fs, struct path *path)
2098 {
2099 struct path old_root;
2100
2101 write_lock(&fs->lock);
2102 old_root = fs->root;
2103 fs->root = *path;
2104 path_get(path);
2105 write_unlock(&fs->lock);
2106 if (old_root.dentry)
2107 path_put(&old_root);
2108 }
2109
2110 /*
2111 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
2112 * It can block. Requires the big lock held.
2113 */
2114 void set_fs_pwd(struct fs_struct *fs, struct path *path)
2115 {
2116 struct path old_pwd;
2117
2118 write_lock(&fs->lock);
2119 old_pwd = fs->pwd;
2120 fs->pwd = *path;
2121 path_get(path);
2122 write_unlock(&fs->lock);
2123
2124 if (old_pwd.dentry)
2125 path_put(&old_pwd);
2126 }
2127
2128 static void chroot_fs_refs(struct path *old_root, struct path *new_root)
2129 {
2130 struct task_struct *g, *p;
2131 struct fs_struct *fs;
2132
2133 read_lock(&tasklist_lock);
2134 do_each_thread(g, p) {
2135 task_lock(p);
2136 fs = p->fs;
2137 if (fs) {
2138 atomic_inc(&fs->count);
2139 task_unlock(p);
2140 if (fs->root.dentry == old_root->dentry
2141 && fs->root.mnt == old_root->mnt)
2142 set_fs_root(fs, new_root);
2143 if (fs->pwd.dentry == old_root->dentry
2144 && fs->pwd.mnt == old_root->mnt)
2145 set_fs_pwd(fs, new_root);
2146 put_fs_struct(fs);
2147 } else
2148 task_unlock(p);
2149 } while_each_thread(g, p);
2150 read_unlock(&tasklist_lock);
2151 }
2152
2153 /*
2154 * pivot_root Semantics:
2155 * Moves the root file system of the current process to the directory put_old,
2156 * makes new_root as the new root file system of the current process, and sets
2157 * root/cwd of all processes which had them on the current root to new_root.
2158 *
2159 * Restrictions:
2160 * The new_root and put_old must be directories, and must not be on the
2161 * same file system as the current process root. The put_old must be
2162 * underneath new_root, i.e. adding a non-zero number of /.. to the string
2163 * pointed to by put_old must yield the same directory as new_root. No other
2164 * file system may be mounted on put_old. After all, new_root is a mountpoint.
2165 *
2166 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2167 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2168 * in this situation.
2169 *
2170 * Notes:
2171 * - we don't move root/cwd if they are not at the root (reason: if something
2172 * cared enough to change them, it's probably wrong to force them elsewhere)
2173 * - it's okay to pick a root that isn't the root of a file system, e.g.
2174 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2175 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2176 * first.
2177 */
2178 asmlinkage long sys_pivot_root(const char __user * new_root,
2179 const char __user * put_old)
2180 {
2181 struct vfsmount *tmp;
2182 struct path new, old, parent_path, root_parent, root;
2183 int error;
2184
2185 if (!capable(CAP_SYS_ADMIN))
2186 return -EPERM;
2187
2188 error = user_path_dir(new_root, &new);
2189 if (error)
2190 goto out0;
2191 error = -EINVAL;
2192 if (!check_mnt(new.mnt))
2193 goto out1;
2194
2195 error = user_path_dir(put_old, &old);
2196 if (error)
2197 goto out1;
2198
2199 error = security_sb_pivotroot(&old, &new);
2200 if (error) {
2201 path_put(&old);
2202 goto out1;
2203 }
2204
2205 read_lock(&current->fs->lock);
2206 root = current->fs->root;
2207 path_get(&current->fs->root);
2208 read_unlock(&current->fs->lock);
2209 down_write(&namespace_sem);
2210 mutex_lock(&old.dentry->d_inode->i_mutex);
2211 error = -EINVAL;
2212 if (IS_MNT_SHARED(old.mnt) ||
2213 IS_MNT_SHARED(new.mnt->mnt_parent) ||
2214 IS_MNT_SHARED(root.mnt->mnt_parent))
2215 goto out2;
2216 if (!check_mnt(root.mnt))
2217 goto out2;
2218 error = -ENOENT;
2219 if (IS_DEADDIR(new.dentry->d_inode))
2220 goto out2;
2221 if (d_unhashed(new.dentry) && !IS_ROOT(new.dentry))
2222 goto out2;
2223 if (d_unhashed(old.dentry) && !IS_ROOT(old.dentry))
2224 goto out2;
2225 error = -EBUSY;
2226 if (new.mnt == root.mnt ||
2227 old.mnt == root.mnt)
2228 goto out2; /* loop, on the same file system */
2229 error = -EINVAL;
2230 if (root.mnt->mnt_root != root.dentry)
2231 goto out2; /* not a mountpoint */
2232 if (root.mnt->mnt_parent == root.mnt)
2233 goto out2; /* not attached */
2234 if (new.mnt->mnt_root != new.dentry)
2235 goto out2; /* not a mountpoint */
2236 if (new.mnt->mnt_parent == new.mnt)
2237 goto out2; /* not attached */
2238 /* make sure we can reach put_old from new_root */
2239 tmp = old.mnt;
2240 spin_lock(&vfsmount_lock);
2241 if (tmp != new.mnt) {
2242 for (;;) {
2243 if (tmp->mnt_parent == tmp)
2244 goto out3; /* already mounted on put_old */
2245 if (tmp->mnt_parent == new.mnt)
2246 break;
2247 tmp = tmp->mnt_parent;
2248 }
2249 if (!is_subdir(tmp->mnt_mountpoint, new.dentry))
2250 goto out3;
2251 } else if (!is_subdir(old.dentry, new.dentry))
2252 goto out3;
2253 detach_mnt(new.mnt, &parent_path);
2254 detach_mnt(root.mnt, &root_parent);
2255 /* mount old root on put_old */
2256 attach_mnt(root.mnt, &old);
2257 /* mount new_root on / */
2258 attach_mnt(new.mnt, &root_parent);
2259 touch_mnt_namespace(current->nsproxy->mnt_ns);
2260 spin_unlock(&vfsmount_lock);
2261 chroot_fs_refs(&root, &new);
2262 security_sb_post_pivotroot(&root, &new);
2263 error = 0;
2264 path_put(&root_parent);
2265 path_put(&parent_path);
2266 out2:
2267 mutex_unlock(&old.dentry->d_inode->i_mutex);
2268 up_write(&namespace_sem);
2269 path_put(&root);
2270 path_put(&old);
2271 out1:
2272 path_put(&new);
2273 out0:
2274 return error;
2275 out3:
2276 spin_unlock(&vfsmount_lock);
2277 goto out2;
2278 }
2279
2280 static void __init init_mount_tree(void)
2281 {
2282 struct vfsmount *mnt;
2283 struct mnt_namespace *ns;
2284 struct path root;
2285
2286 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
2287 if (IS_ERR(mnt))
2288 panic("Can't create rootfs");
2289 ns = kmalloc(sizeof(*ns), GFP_KERNEL);
2290 if (!ns)
2291 panic("Can't allocate initial namespace");
2292 atomic_set(&ns->count, 1);
2293 INIT_LIST_HEAD(&ns->list);
2294 init_waitqueue_head(&ns->poll);
2295 ns->event = 0;
2296 list_add(&mnt->mnt_list, &ns->list);
2297 ns->root = mnt;
2298 mnt->mnt_ns = ns;
2299
2300 init_task.nsproxy->mnt_ns = ns;
2301 get_mnt_ns(ns);
2302
2303 root.mnt = ns->root;
2304 root.dentry = ns->root->mnt_root;
2305
2306 set_fs_pwd(current->fs, &root);
2307 set_fs_root(current->fs, &root);
2308 }
2309
2310 void __init mnt_init(void)
2311 {
2312 unsigned u;
2313 int err;
2314
2315 init_rwsem(&namespace_sem);
2316
2317 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
2318 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2319
2320 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
2321
2322 if (!mount_hashtable)
2323 panic("Failed to allocate mount hash table\n");
2324
2325 printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
2326
2327 for (u = 0; u < HASH_SIZE; u++)
2328 INIT_LIST_HEAD(&mount_hashtable[u]);
2329
2330 err = sysfs_init();
2331 if (err)
2332 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
2333 __func__, err);
2334 fs_kobj = kobject_create_and_add("fs", NULL);
2335 if (!fs_kobj)
2336 printk(KERN_WARNING "%s: kobj create error\n", __func__);
2337 init_rootfs();
2338 init_mount_tree();
2339 }
2340
2341 void __put_mnt_ns(struct mnt_namespace *ns)
2342 {
2343 struct vfsmount *root = ns->root;
2344 LIST_HEAD(umount_list);
2345 ns->root = NULL;
2346 spin_unlock(&vfsmount_lock);
2347 down_write(&namespace_sem);
2348 spin_lock(&vfsmount_lock);
2349 umount_tree(root, 0, &umount_list);
2350 spin_unlock(&vfsmount_lock);
2351 up_write(&namespace_sem);
2352 release_mounts(&umount_list);
2353 kfree(ns);
2354 }