]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/super.c
4554ac257647b81cc651a253ee0d6e81b330763d
[mirror_ubuntu-zesty-kernel.git] / fs / super.c
1 /*
2 * linux/fs/super.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * super.c contains code to handle: - mount structures
7 * - super-block tables
8 * - filesystem drivers list
9 * - mount system call
10 * - umount system call
11 * - ustat system call
12 *
13 * GK 2/5/95 - Changed to support mounting the root fs via NFS
14 *
15 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
16 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
17 * Added options to /proc/mounts:
18 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
19 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
20 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
21 */
22
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/blkdev.h>
26 #include <linux/mount.h>
27 #include <linux/security.h>
28 #include <linux/writeback.h> /* for the emergency remount stuff */
29 #include <linux/idr.h>
30 #include <linux/mutex.h>
31 #include <linux/backing-dev.h>
32 #include <linux/rculist_bl.h>
33 #include <linux/cleancache.h>
34 #include <linux/fsnotify.h>
35 #include <linux/lockdep.h>
36 #include "internal.h"
37
38
39 LIST_HEAD(super_blocks);
40 DEFINE_SPINLOCK(sb_lock);
41
42 static char *sb_writers_name[SB_FREEZE_LEVELS] = {
43 "sb_writers",
44 "sb_pagefaults",
45 "sb_internal",
46 };
47
48 /*
49 * One thing we have to be careful of with a per-sb shrinker is that we don't
50 * drop the last active reference to the superblock from within the shrinker.
51 * If that happens we could trigger unregistering the shrinker from within the
52 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
53 * take a passive reference to the superblock to avoid this from occurring.
54 */
55 static unsigned long super_cache_scan(struct shrinker *shrink,
56 struct shrink_control *sc)
57 {
58 struct super_block *sb;
59 long fs_objects = 0;
60 long total_objects;
61 long freed = 0;
62 long dentries;
63 long inodes;
64
65 sb = container_of(shrink, struct super_block, s_shrink);
66
67 /*
68 * Deadlock avoidance. We may hold various FS locks, and we don't want
69 * to recurse into the FS that called us in clear_inode() and friends..
70 */
71 if (!(sc->gfp_mask & __GFP_FS))
72 return SHRINK_STOP;
73
74 if (!grab_super_passive(sb))
75 return SHRINK_STOP;
76
77 if (sb->s_op->nr_cached_objects)
78 fs_objects = sb->s_op->nr_cached_objects(sb, sc->nid);
79
80 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
81 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
82 total_objects = dentries + inodes + fs_objects + 1;
83 if (!total_objects)
84 total_objects = 1;
85
86 /* proportion the scan between the caches */
87 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
88 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
89 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
90
91 /*
92 * prune the dcache first as the icache is pinned by it, then
93 * prune the icache, followed by the filesystem specific caches
94 */
95 sc->nr_to_scan = dentries;
96 freed = prune_dcache_sb(sb, sc);
97 sc->nr_to_scan = inodes;
98 freed += prune_icache_sb(sb, sc);
99
100 if (fs_objects)
101 freed += sb->s_op->free_cached_objects(sb, fs_objects,
102 sc->nid);
103
104 drop_super(sb);
105 return freed;
106 }
107
108 static unsigned long super_cache_count(struct shrinker *shrink,
109 struct shrink_control *sc)
110 {
111 struct super_block *sb;
112 long total_objects = 0;
113
114 sb = container_of(shrink, struct super_block, s_shrink);
115
116 /*
117 * Don't call grab_super_passive as it is a potential
118 * scalability bottleneck. The counts could get updated
119 * between super_cache_count and super_cache_scan anyway.
120 * Call to super_cache_count with shrinker_rwsem held
121 * ensures the safety of call to list_lru_shrink_count() and
122 * s_op->nr_cached_objects().
123 */
124 if (sb->s_op && sb->s_op->nr_cached_objects)
125 total_objects = sb->s_op->nr_cached_objects(sb,
126 sc->nid);
127
128 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
129 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
130
131 total_objects = vfs_pressure_ratio(total_objects);
132 return total_objects;
133 }
134
135 /**
136 * destroy_super - frees a superblock
137 * @s: superblock to free
138 *
139 * Frees a superblock.
140 */
141 static void destroy_super(struct super_block *s)
142 {
143 int i;
144 list_lru_destroy(&s->s_dentry_lru);
145 list_lru_destroy(&s->s_inode_lru);
146 for (i = 0; i < SB_FREEZE_LEVELS; i++)
147 percpu_counter_destroy(&s->s_writers.counter[i]);
148 security_sb_free(s);
149 WARN_ON(!list_empty(&s->s_mounts));
150 kfree(s->s_subtype);
151 kfree(s->s_options);
152 kfree_rcu(s, rcu);
153 }
154
155 /**
156 * alloc_super - create new superblock
157 * @type: filesystem type superblock should belong to
158 * @flags: the mount flags
159 *
160 * Allocates and initializes a new &struct super_block. alloc_super()
161 * returns a pointer new superblock or %NULL if allocation had failed.
162 */
163 static struct super_block *alloc_super(struct file_system_type *type, int flags)
164 {
165 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
166 static const struct super_operations default_op;
167 int i;
168
169 if (!s)
170 return NULL;
171
172 INIT_LIST_HEAD(&s->s_mounts);
173
174 if (security_sb_alloc(s))
175 goto fail;
176
177 for (i = 0; i < SB_FREEZE_LEVELS; i++) {
178 if (percpu_counter_init(&s->s_writers.counter[i], 0,
179 GFP_KERNEL) < 0)
180 goto fail;
181 lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
182 &type->s_writers_key[i], 0);
183 }
184 init_waitqueue_head(&s->s_writers.wait);
185 init_waitqueue_head(&s->s_writers.wait_unfrozen);
186 s->s_flags = flags;
187 s->s_bdi = &default_backing_dev_info;
188 INIT_HLIST_NODE(&s->s_instances);
189 INIT_HLIST_BL_HEAD(&s->s_anon);
190 INIT_LIST_HEAD(&s->s_inodes);
191
192 if (list_lru_init(&s->s_dentry_lru))
193 goto fail;
194 if (list_lru_init(&s->s_inode_lru))
195 goto fail;
196
197 init_rwsem(&s->s_umount);
198 lockdep_set_class(&s->s_umount, &type->s_umount_key);
199 /*
200 * sget() can have s_umount recursion.
201 *
202 * When it cannot find a suitable sb, it allocates a new
203 * one (this one), and tries again to find a suitable old
204 * one.
205 *
206 * In case that succeeds, it will acquire the s_umount
207 * lock of the old one. Since these are clearly distrinct
208 * locks, and this object isn't exposed yet, there's no
209 * risk of deadlocks.
210 *
211 * Annotate this by putting this lock in a different
212 * subclass.
213 */
214 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
215 s->s_count = 1;
216 atomic_set(&s->s_active, 1);
217 mutex_init(&s->s_vfs_rename_mutex);
218 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
219 mutex_init(&s->s_dquot.dqio_mutex);
220 mutex_init(&s->s_dquot.dqonoff_mutex);
221 s->s_maxbytes = MAX_NON_LFS;
222 s->s_op = &default_op;
223 s->s_time_gran = 1000000000;
224 s->cleancache_poolid = -1;
225
226 s->s_shrink.seeks = DEFAULT_SEEKS;
227 s->s_shrink.scan_objects = super_cache_scan;
228 s->s_shrink.count_objects = super_cache_count;
229 s->s_shrink.batch = 1024;
230 s->s_shrink.flags = SHRINKER_NUMA_AWARE;
231 return s;
232
233 fail:
234 destroy_super(s);
235 return NULL;
236 }
237
238 /* Superblock refcounting */
239
240 /*
241 * Drop a superblock's refcount. The caller must hold sb_lock.
242 */
243 static void __put_super(struct super_block *sb)
244 {
245 if (!--sb->s_count) {
246 list_del_init(&sb->s_list);
247 destroy_super(sb);
248 }
249 }
250
251 /**
252 * put_super - drop a temporary reference to superblock
253 * @sb: superblock in question
254 *
255 * Drops a temporary reference, frees superblock if there's no
256 * references left.
257 */
258 static void put_super(struct super_block *sb)
259 {
260 spin_lock(&sb_lock);
261 __put_super(sb);
262 spin_unlock(&sb_lock);
263 }
264
265
266 /**
267 * deactivate_locked_super - drop an active reference to superblock
268 * @s: superblock to deactivate
269 *
270 * Drops an active reference to superblock, converting it into a temprory
271 * one if there is no other active references left. In that case we
272 * tell fs driver to shut it down and drop the temporary reference we
273 * had just acquired.
274 *
275 * Caller holds exclusive lock on superblock; that lock is released.
276 */
277 void deactivate_locked_super(struct super_block *s)
278 {
279 struct file_system_type *fs = s->s_type;
280 if (atomic_dec_and_test(&s->s_active)) {
281 cleancache_invalidate_fs(s);
282 unregister_shrinker(&s->s_shrink);
283 fs->kill_sb(s);
284
285 put_filesystem(fs);
286 put_super(s);
287 } else {
288 up_write(&s->s_umount);
289 }
290 }
291
292 EXPORT_SYMBOL(deactivate_locked_super);
293
294 /**
295 * deactivate_super - drop an active reference to superblock
296 * @s: superblock to deactivate
297 *
298 * Variant of deactivate_locked_super(), except that superblock is *not*
299 * locked by caller. If we are going to drop the final active reference,
300 * lock will be acquired prior to that.
301 */
302 void deactivate_super(struct super_block *s)
303 {
304 if (!atomic_add_unless(&s->s_active, -1, 1)) {
305 down_write(&s->s_umount);
306 deactivate_locked_super(s);
307 }
308 }
309
310 EXPORT_SYMBOL(deactivate_super);
311
312 /**
313 * grab_super - acquire an active reference
314 * @s: reference we are trying to make active
315 *
316 * Tries to acquire an active reference. grab_super() is used when we
317 * had just found a superblock in super_blocks or fs_type->fs_supers
318 * and want to turn it into a full-blown active reference. grab_super()
319 * is called with sb_lock held and drops it. Returns 1 in case of
320 * success, 0 if we had failed (superblock contents was already dead or
321 * dying when grab_super() had been called). Note that this is only
322 * called for superblocks not in rundown mode (== ones still on ->fs_supers
323 * of their type), so increment of ->s_count is OK here.
324 */
325 static int grab_super(struct super_block *s) __releases(sb_lock)
326 {
327 s->s_count++;
328 spin_unlock(&sb_lock);
329 down_write(&s->s_umount);
330 if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
331 put_super(s);
332 return 1;
333 }
334 up_write(&s->s_umount);
335 put_super(s);
336 return 0;
337 }
338
339 /*
340 * grab_super_passive - acquire a passive reference
341 * @sb: reference we are trying to grab
342 *
343 * Tries to acquire a passive reference. This is used in places where we
344 * cannot take an active reference but we need to ensure that the
345 * superblock does not go away while we are working on it. It returns
346 * false if a reference was not gained, and returns true with the s_umount
347 * lock held in read mode if a reference is gained. On successful return,
348 * the caller must drop the s_umount lock and the passive reference when
349 * done.
350 */
351 bool grab_super_passive(struct super_block *sb)
352 {
353 spin_lock(&sb_lock);
354 if (hlist_unhashed(&sb->s_instances)) {
355 spin_unlock(&sb_lock);
356 return false;
357 }
358
359 sb->s_count++;
360 spin_unlock(&sb_lock);
361
362 if (down_read_trylock(&sb->s_umount)) {
363 if (sb->s_root && (sb->s_flags & MS_BORN))
364 return true;
365 up_read(&sb->s_umount);
366 }
367
368 put_super(sb);
369 return false;
370 }
371
372 /**
373 * generic_shutdown_super - common helper for ->kill_sb()
374 * @sb: superblock to kill
375 *
376 * generic_shutdown_super() does all fs-independent work on superblock
377 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
378 * that need destruction out of superblock, call generic_shutdown_super()
379 * and release aforementioned objects. Note: dentries and inodes _are_
380 * taken care of and do not need specific handling.
381 *
382 * Upon calling this function, the filesystem may no longer alter or
383 * rearrange the set of dentries belonging to this super_block, nor may it
384 * change the attachments of dentries to inodes.
385 */
386 void generic_shutdown_super(struct super_block *sb)
387 {
388 const struct super_operations *sop = sb->s_op;
389
390 if (sb->s_root) {
391 shrink_dcache_for_umount(sb);
392 sync_filesystem(sb);
393 sb->s_flags &= ~MS_ACTIVE;
394
395 fsnotify_unmount_inodes(&sb->s_inodes);
396
397 evict_inodes(sb);
398
399 if (sb->s_dio_done_wq) {
400 destroy_workqueue(sb->s_dio_done_wq);
401 sb->s_dio_done_wq = NULL;
402 }
403
404 if (sop->put_super)
405 sop->put_super(sb);
406
407 if (!list_empty(&sb->s_inodes)) {
408 printk("VFS: Busy inodes after unmount of %s. "
409 "Self-destruct in 5 seconds. Have a nice day...\n",
410 sb->s_id);
411 }
412 }
413 spin_lock(&sb_lock);
414 /* should be initialized for __put_super_and_need_restart() */
415 hlist_del_init(&sb->s_instances);
416 spin_unlock(&sb_lock);
417 up_write(&sb->s_umount);
418 }
419
420 EXPORT_SYMBOL(generic_shutdown_super);
421
422 /**
423 * sget - find or create a superblock
424 * @type: filesystem type superblock should belong to
425 * @test: comparison callback
426 * @set: setup callback
427 * @flags: mount flags
428 * @data: argument to each of them
429 */
430 struct super_block *sget(struct file_system_type *type,
431 int (*test)(struct super_block *,void *),
432 int (*set)(struct super_block *,void *),
433 int flags,
434 void *data)
435 {
436 struct super_block *s = NULL;
437 struct super_block *old;
438 int err;
439
440 retry:
441 spin_lock(&sb_lock);
442 if (test) {
443 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
444 if (!test(old, data))
445 continue;
446 if (!grab_super(old))
447 goto retry;
448 if (s) {
449 up_write(&s->s_umount);
450 destroy_super(s);
451 s = NULL;
452 }
453 return old;
454 }
455 }
456 if (!s) {
457 spin_unlock(&sb_lock);
458 s = alloc_super(type, flags);
459 if (!s)
460 return ERR_PTR(-ENOMEM);
461 goto retry;
462 }
463
464 err = set(s, data);
465 if (err) {
466 spin_unlock(&sb_lock);
467 up_write(&s->s_umount);
468 destroy_super(s);
469 return ERR_PTR(err);
470 }
471 s->s_type = type;
472 strlcpy(s->s_id, type->name, sizeof(s->s_id));
473 list_add_tail(&s->s_list, &super_blocks);
474 hlist_add_head(&s->s_instances, &type->fs_supers);
475 spin_unlock(&sb_lock);
476 get_filesystem(type);
477 register_shrinker(&s->s_shrink);
478 return s;
479 }
480
481 EXPORT_SYMBOL(sget);
482
483 void drop_super(struct super_block *sb)
484 {
485 up_read(&sb->s_umount);
486 put_super(sb);
487 }
488
489 EXPORT_SYMBOL(drop_super);
490
491 /**
492 * iterate_supers - call function for all active superblocks
493 * @f: function to call
494 * @arg: argument to pass to it
495 *
496 * Scans the superblock list and calls given function, passing it
497 * locked superblock and given argument.
498 */
499 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
500 {
501 struct super_block *sb, *p = NULL;
502
503 spin_lock(&sb_lock);
504 list_for_each_entry(sb, &super_blocks, s_list) {
505 if (hlist_unhashed(&sb->s_instances))
506 continue;
507 sb->s_count++;
508 spin_unlock(&sb_lock);
509
510 down_read(&sb->s_umount);
511 if (sb->s_root && (sb->s_flags & MS_BORN))
512 f(sb, arg);
513 up_read(&sb->s_umount);
514
515 spin_lock(&sb_lock);
516 if (p)
517 __put_super(p);
518 p = sb;
519 }
520 if (p)
521 __put_super(p);
522 spin_unlock(&sb_lock);
523 }
524
525 /**
526 * iterate_supers_type - call function for superblocks of given type
527 * @type: fs type
528 * @f: function to call
529 * @arg: argument to pass to it
530 *
531 * Scans the superblock list and calls given function, passing it
532 * locked superblock and given argument.
533 */
534 void iterate_supers_type(struct file_system_type *type,
535 void (*f)(struct super_block *, void *), void *arg)
536 {
537 struct super_block *sb, *p = NULL;
538
539 spin_lock(&sb_lock);
540 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
541 sb->s_count++;
542 spin_unlock(&sb_lock);
543
544 down_read(&sb->s_umount);
545 if (sb->s_root && (sb->s_flags & MS_BORN))
546 f(sb, arg);
547 up_read(&sb->s_umount);
548
549 spin_lock(&sb_lock);
550 if (p)
551 __put_super(p);
552 p = sb;
553 }
554 if (p)
555 __put_super(p);
556 spin_unlock(&sb_lock);
557 }
558
559 EXPORT_SYMBOL(iterate_supers_type);
560
561 /**
562 * get_super - get the superblock of a device
563 * @bdev: device to get the superblock for
564 *
565 * Scans the superblock list and finds the superblock of the file system
566 * mounted on the device given. %NULL is returned if no match is found.
567 */
568
569 struct super_block *get_super(struct block_device *bdev)
570 {
571 struct super_block *sb;
572
573 if (!bdev)
574 return NULL;
575
576 spin_lock(&sb_lock);
577 rescan:
578 list_for_each_entry(sb, &super_blocks, s_list) {
579 if (hlist_unhashed(&sb->s_instances))
580 continue;
581 if (sb->s_bdev == bdev) {
582 sb->s_count++;
583 spin_unlock(&sb_lock);
584 down_read(&sb->s_umount);
585 /* still alive? */
586 if (sb->s_root && (sb->s_flags & MS_BORN))
587 return sb;
588 up_read(&sb->s_umount);
589 /* nope, got unmounted */
590 spin_lock(&sb_lock);
591 __put_super(sb);
592 goto rescan;
593 }
594 }
595 spin_unlock(&sb_lock);
596 return NULL;
597 }
598
599 EXPORT_SYMBOL(get_super);
600
601 /**
602 * get_super_thawed - get thawed superblock of a device
603 * @bdev: device to get the superblock for
604 *
605 * Scans the superblock list and finds the superblock of the file system
606 * mounted on the device. The superblock is returned once it is thawed
607 * (or immediately if it was not frozen). %NULL is returned if no match
608 * is found.
609 */
610 struct super_block *get_super_thawed(struct block_device *bdev)
611 {
612 while (1) {
613 struct super_block *s = get_super(bdev);
614 if (!s || s->s_writers.frozen == SB_UNFROZEN)
615 return s;
616 up_read(&s->s_umount);
617 wait_event(s->s_writers.wait_unfrozen,
618 s->s_writers.frozen == SB_UNFROZEN);
619 put_super(s);
620 }
621 }
622 EXPORT_SYMBOL(get_super_thawed);
623
624 /**
625 * get_active_super - get an active reference to the superblock of a device
626 * @bdev: device to get the superblock for
627 *
628 * Scans the superblock list and finds the superblock of the file system
629 * mounted on the device given. Returns the superblock with an active
630 * reference or %NULL if none was found.
631 */
632 struct super_block *get_active_super(struct block_device *bdev)
633 {
634 struct super_block *sb;
635
636 if (!bdev)
637 return NULL;
638
639 restart:
640 spin_lock(&sb_lock);
641 list_for_each_entry(sb, &super_blocks, s_list) {
642 if (hlist_unhashed(&sb->s_instances))
643 continue;
644 if (sb->s_bdev == bdev) {
645 if (!grab_super(sb))
646 goto restart;
647 up_write(&sb->s_umount);
648 return sb;
649 }
650 }
651 spin_unlock(&sb_lock);
652 return NULL;
653 }
654
655 struct super_block *user_get_super(dev_t dev)
656 {
657 struct super_block *sb;
658
659 spin_lock(&sb_lock);
660 rescan:
661 list_for_each_entry(sb, &super_blocks, s_list) {
662 if (hlist_unhashed(&sb->s_instances))
663 continue;
664 if (sb->s_dev == dev) {
665 sb->s_count++;
666 spin_unlock(&sb_lock);
667 down_read(&sb->s_umount);
668 /* still alive? */
669 if (sb->s_root && (sb->s_flags & MS_BORN))
670 return sb;
671 up_read(&sb->s_umount);
672 /* nope, got unmounted */
673 spin_lock(&sb_lock);
674 __put_super(sb);
675 goto rescan;
676 }
677 }
678 spin_unlock(&sb_lock);
679 return NULL;
680 }
681
682 /**
683 * do_remount_sb - asks filesystem to change mount options.
684 * @sb: superblock in question
685 * @flags: numeric part of options
686 * @data: the rest of options
687 * @force: whether or not to force the change
688 *
689 * Alters the mount options of a mounted file system.
690 */
691 int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
692 {
693 int retval;
694 int remount_ro;
695
696 if (sb->s_writers.frozen != SB_UNFROZEN)
697 return -EBUSY;
698
699 #ifdef CONFIG_BLOCK
700 if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
701 return -EACCES;
702 #endif
703
704 remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
705
706 if (remount_ro) {
707 if (sb->s_pins.first) {
708 up_write(&sb->s_umount);
709 sb_pin_kill(sb);
710 down_write(&sb->s_umount);
711 if (!sb->s_root)
712 return 0;
713 if (sb->s_writers.frozen != SB_UNFROZEN)
714 return -EBUSY;
715 remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
716 }
717 }
718 shrink_dcache_sb(sb);
719
720 /* If we are remounting RDONLY and current sb is read/write,
721 make sure there are no rw files opened */
722 if (remount_ro) {
723 if (force) {
724 sb->s_readonly_remount = 1;
725 smp_wmb();
726 } else {
727 retval = sb_prepare_remount_readonly(sb);
728 if (retval)
729 return retval;
730 }
731 }
732
733 if (sb->s_op->remount_fs) {
734 retval = sb->s_op->remount_fs(sb, &flags, data);
735 if (retval) {
736 if (!force)
737 goto cancel_readonly;
738 /* If forced remount, go ahead despite any errors */
739 WARN(1, "forced remount of a %s fs returned %i\n",
740 sb->s_type->name, retval);
741 }
742 }
743 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
744 /* Needs to be ordered wrt mnt_is_readonly() */
745 smp_wmb();
746 sb->s_readonly_remount = 0;
747
748 /*
749 * Some filesystems modify their metadata via some other path than the
750 * bdev buffer cache (eg. use a private mapping, or directories in
751 * pagecache, etc). Also file data modifications go via their own
752 * mappings. So If we try to mount readonly then copy the filesystem
753 * from bdev, we could get stale data, so invalidate it to give a best
754 * effort at coherency.
755 */
756 if (remount_ro && sb->s_bdev)
757 invalidate_bdev(sb->s_bdev);
758 return 0;
759
760 cancel_readonly:
761 sb->s_readonly_remount = 0;
762 return retval;
763 }
764
765 static void do_emergency_remount(struct work_struct *work)
766 {
767 struct super_block *sb, *p = NULL;
768
769 spin_lock(&sb_lock);
770 list_for_each_entry(sb, &super_blocks, s_list) {
771 if (hlist_unhashed(&sb->s_instances))
772 continue;
773 sb->s_count++;
774 spin_unlock(&sb_lock);
775 down_write(&sb->s_umount);
776 if (sb->s_root && sb->s_bdev && (sb->s_flags & MS_BORN) &&
777 !(sb->s_flags & MS_RDONLY)) {
778 /*
779 * What lock protects sb->s_flags??
780 */
781 do_remount_sb(sb, MS_RDONLY, NULL, 1);
782 }
783 up_write(&sb->s_umount);
784 spin_lock(&sb_lock);
785 if (p)
786 __put_super(p);
787 p = sb;
788 }
789 if (p)
790 __put_super(p);
791 spin_unlock(&sb_lock);
792 kfree(work);
793 printk("Emergency Remount complete\n");
794 }
795
796 void emergency_remount(void)
797 {
798 struct work_struct *work;
799
800 work = kmalloc(sizeof(*work), GFP_ATOMIC);
801 if (work) {
802 INIT_WORK(work, do_emergency_remount);
803 schedule_work(work);
804 }
805 }
806
807 /*
808 * Unnamed block devices are dummy devices used by virtual
809 * filesystems which don't use real block-devices. -- jrs
810 */
811
812 static DEFINE_IDA(unnamed_dev_ida);
813 static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
814 /* Many userspace utilities consider an FSID of 0 invalid.
815 * Always return at least 1 from get_anon_bdev.
816 */
817 static int unnamed_dev_start = 1;
818
819 int get_anon_bdev(dev_t *p)
820 {
821 int dev;
822 int error;
823
824 retry:
825 if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
826 return -ENOMEM;
827 spin_lock(&unnamed_dev_lock);
828 error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
829 if (!error)
830 unnamed_dev_start = dev + 1;
831 spin_unlock(&unnamed_dev_lock);
832 if (error == -EAGAIN)
833 /* We raced and lost with another CPU. */
834 goto retry;
835 else if (error)
836 return -EAGAIN;
837
838 if (dev == (1 << MINORBITS)) {
839 spin_lock(&unnamed_dev_lock);
840 ida_remove(&unnamed_dev_ida, dev);
841 if (unnamed_dev_start > dev)
842 unnamed_dev_start = dev;
843 spin_unlock(&unnamed_dev_lock);
844 return -EMFILE;
845 }
846 *p = MKDEV(0, dev & MINORMASK);
847 return 0;
848 }
849 EXPORT_SYMBOL(get_anon_bdev);
850
851 void free_anon_bdev(dev_t dev)
852 {
853 int slot = MINOR(dev);
854 spin_lock(&unnamed_dev_lock);
855 ida_remove(&unnamed_dev_ida, slot);
856 if (slot < unnamed_dev_start)
857 unnamed_dev_start = slot;
858 spin_unlock(&unnamed_dev_lock);
859 }
860 EXPORT_SYMBOL(free_anon_bdev);
861
862 int set_anon_super(struct super_block *s, void *data)
863 {
864 int error = get_anon_bdev(&s->s_dev);
865 if (!error)
866 s->s_bdi = &noop_backing_dev_info;
867 return error;
868 }
869
870 EXPORT_SYMBOL(set_anon_super);
871
872 void kill_anon_super(struct super_block *sb)
873 {
874 dev_t dev = sb->s_dev;
875 generic_shutdown_super(sb);
876 free_anon_bdev(dev);
877 }
878
879 EXPORT_SYMBOL(kill_anon_super);
880
881 void kill_litter_super(struct super_block *sb)
882 {
883 if (sb->s_root)
884 d_genocide(sb->s_root);
885 kill_anon_super(sb);
886 }
887
888 EXPORT_SYMBOL(kill_litter_super);
889
890 static int ns_test_super(struct super_block *sb, void *data)
891 {
892 return sb->s_fs_info == data;
893 }
894
895 static int ns_set_super(struct super_block *sb, void *data)
896 {
897 sb->s_fs_info = data;
898 return set_anon_super(sb, NULL);
899 }
900
901 struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
902 void *data, int (*fill_super)(struct super_block *, void *, int))
903 {
904 struct super_block *sb;
905
906 sb = sget(fs_type, ns_test_super, ns_set_super, flags, data);
907 if (IS_ERR(sb))
908 return ERR_CAST(sb);
909
910 if (!sb->s_root) {
911 int err;
912 err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
913 if (err) {
914 deactivate_locked_super(sb);
915 return ERR_PTR(err);
916 }
917
918 sb->s_flags |= MS_ACTIVE;
919 }
920
921 return dget(sb->s_root);
922 }
923
924 EXPORT_SYMBOL(mount_ns);
925
926 #ifdef CONFIG_BLOCK
927 static int set_bdev_super(struct super_block *s, void *data)
928 {
929 s->s_bdev = data;
930 s->s_dev = s->s_bdev->bd_dev;
931
932 /*
933 * We set the bdi here to the queue backing, file systems can
934 * overwrite this in ->fill_super()
935 */
936 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
937 return 0;
938 }
939
940 static int test_bdev_super(struct super_block *s, void *data)
941 {
942 return (void *)s->s_bdev == data;
943 }
944
945 struct dentry *mount_bdev(struct file_system_type *fs_type,
946 int flags, const char *dev_name, void *data,
947 int (*fill_super)(struct super_block *, void *, int))
948 {
949 struct block_device *bdev;
950 struct super_block *s;
951 fmode_t mode = FMODE_READ | FMODE_EXCL;
952 int error = 0;
953
954 if (!(flags & MS_RDONLY))
955 mode |= FMODE_WRITE;
956
957 bdev = blkdev_get_by_path(dev_name, mode, fs_type);
958 if (IS_ERR(bdev))
959 return ERR_CAST(bdev);
960
961 /*
962 * once the super is inserted into the list by sget, s_umount
963 * will protect the lockfs code from trying to start a snapshot
964 * while we are mounting
965 */
966 mutex_lock(&bdev->bd_fsfreeze_mutex);
967 if (bdev->bd_fsfreeze_count > 0) {
968 mutex_unlock(&bdev->bd_fsfreeze_mutex);
969 error = -EBUSY;
970 goto error_bdev;
971 }
972 s = sget(fs_type, test_bdev_super, set_bdev_super, flags | MS_NOSEC,
973 bdev);
974 mutex_unlock(&bdev->bd_fsfreeze_mutex);
975 if (IS_ERR(s))
976 goto error_s;
977
978 if (s->s_root) {
979 if ((flags ^ s->s_flags) & MS_RDONLY) {
980 deactivate_locked_super(s);
981 error = -EBUSY;
982 goto error_bdev;
983 }
984
985 /*
986 * s_umount nests inside bd_mutex during
987 * __invalidate_device(). blkdev_put() acquires
988 * bd_mutex and can't be called under s_umount. Drop
989 * s_umount temporarily. This is safe as we're
990 * holding an active reference.
991 */
992 up_write(&s->s_umount);
993 blkdev_put(bdev, mode);
994 down_write(&s->s_umount);
995 } else {
996 char b[BDEVNAME_SIZE];
997
998 s->s_mode = mode;
999 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
1000 sb_set_blocksize(s, block_size(bdev));
1001 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1002 if (error) {
1003 deactivate_locked_super(s);
1004 goto error;
1005 }
1006
1007 s->s_flags |= MS_ACTIVE;
1008 bdev->bd_super = s;
1009 }
1010
1011 return dget(s->s_root);
1012
1013 error_s:
1014 error = PTR_ERR(s);
1015 error_bdev:
1016 blkdev_put(bdev, mode);
1017 error:
1018 return ERR_PTR(error);
1019 }
1020 EXPORT_SYMBOL(mount_bdev);
1021
1022 void kill_block_super(struct super_block *sb)
1023 {
1024 struct block_device *bdev = sb->s_bdev;
1025 fmode_t mode = sb->s_mode;
1026
1027 bdev->bd_super = NULL;
1028 generic_shutdown_super(sb);
1029 sync_blockdev(bdev);
1030 WARN_ON_ONCE(!(mode & FMODE_EXCL));
1031 blkdev_put(bdev, mode | FMODE_EXCL);
1032 }
1033
1034 EXPORT_SYMBOL(kill_block_super);
1035 #endif
1036
1037 struct dentry *mount_nodev(struct file_system_type *fs_type,
1038 int flags, void *data,
1039 int (*fill_super)(struct super_block *, void *, int))
1040 {
1041 int error;
1042 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1043
1044 if (IS_ERR(s))
1045 return ERR_CAST(s);
1046
1047 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1048 if (error) {
1049 deactivate_locked_super(s);
1050 return ERR_PTR(error);
1051 }
1052 s->s_flags |= MS_ACTIVE;
1053 return dget(s->s_root);
1054 }
1055 EXPORT_SYMBOL(mount_nodev);
1056
1057 static int compare_single(struct super_block *s, void *p)
1058 {
1059 return 1;
1060 }
1061
1062 struct dentry *mount_single(struct file_system_type *fs_type,
1063 int flags, void *data,
1064 int (*fill_super)(struct super_block *, void *, int))
1065 {
1066 struct super_block *s;
1067 int error;
1068
1069 s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1070 if (IS_ERR(s))
1071 return ERR_CAST(s);
1072 if (!s->s_root) {
1073 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1074 if (error) {
1075 deactivate_locked_super(s);
1076 return ERR_PTR(error);
1077 }
1078 s->s_flags |= MS_ACTIVE;
1079 } else {
1080 do_remount_sb(s, flags, data, 0);
1081 }
1082 return dget(s->s_root);
1083 }
1084 EXPORT_SYMBOL(mount_single);
1085
1086 struct dentry *
1087 mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
1088 {
1089 struct dentry *root;
1090 struct super_block *sb;
1091 char *secdata = NULL;
1092 int error = -ENOMEM;
1093
1094 if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
1095 secdata = alloc_secdata();
1096 if (!secdata)
1097 goto out;
1098
1099 error = security_sb_copy_data(data, secdata);
1100 if (error)
1101 goto out_free_secdata;
1102 }
1103
1104 root = type->mount(type, flags, name, data);
1105 if (IS_ERR(root)) {
1106 error = PTR_ERR(root);
1107 goto out_free_secdata;
1108 }
1109 sb = root->d_sb;
1110 BUG_ON(!sb);
1111 WARN_ON(!sb->s_bdi);
1112 WARN_ON(sb->s_bdi == &default_backing_dev_info);
1113 sb->s_flags |= MS_BORN;
1114
1115 error = security_sb_kern_mount(sb, flags, secdata);
1116 if (error)
1117 goto out_sb;
1118
1119 /*
1120 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1121 * but s_maxbytes was an unsigned long long for many releases. Throw
1122 * this warning for a little while to try and catch filesystems that
1123 * violate this rule.
1124 */
1125 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1126 "negative value (%lld)\n", type->name, sb->s_maxbytes);
1127
1128 up_write(&sb->s_umount);
1129 free_secdata(secdata);
1130 return root;
1131 out_sb:
1132 dput(root);
1133 deactivate_locked_super(sb);
1134 out_free_secdata:
1135 free_secdata(secdata);
1136 out:
1137 return ERR_PTR(error);
1138 }
1139
1140 /*
1141 * This is an internal function, please use sb_end_{write,pagefault,intwrite}
1142 * instead.
1143 */
1144 void __sb_end_write(struct super_block *sb, int level)
1145 {
1146 percpu_counter_dec(&sb->s_writers.counter[level-1]);
1147 /*
1148 * Make sure s_writers are updated before we wake up waiters in
1149 * freeze_super().
1150 */
1151 smp_mb();
1152 if (waitqueue_active(&sb->s_writers.wait))
1153 wake_up(&sb->s_writers.wait);
1154 rwsem_release(&sb->s_writers.lock_map[level-1], 1, _RET_IP_);
1155 }
1156 EXPORT_SYMBOL(__sb_end_write);
1157
1158 #ifdef CONFIG_LOCKDEP
1159 /*
1160 * We want lockdep to tell us about possible deadlocks with freezing but
1161 * it's it bit tricky to properly instrument it. Getting a freeze protection
1162 * works as getting a read lock but there are subtle problems. XFS for example
1163 * gets freeze protection on internal level twice in some cases, which is OK
1164 * only because we already hold a freeze protection also on higher level. Due
1165 * to these cases we have to tell lockdep we are doing trylock when we
1166 * already hold a freeze protection for a higher freeze level.
1167 */
1168 static void acquire_freeze_lock(struct super_block *sb, int level, bool trylock,
1169 unsigned long ip)
1170 {
1171 int i;
1172
1173 if (!trylock) {
1174 for (i = 0; i < level - 1; i++)
1175 if (lock_is_held(&sb->s_writers.lock_map[i])) {
1176 trylock = true;
1177 break;
1178 }
1179 }
1180 rwsem_acquire_read(&sb->s_writers.lock_map[level-1], 0, trylock, ip);
1181 }
1182 #endif
1183
1184 /*
1185 * This is an internal function, please use sb_start_{write,pagefault,intwrite}
1186 * instead.
1187 */
1188 int __sb_start_write(struct super_block *sb, int level, bool wait)
1189 {
1190 retry:
1191 if (unlikely(sb->s_writers.frozen >= level)) {
1192 if (!wait)
1193 return 0;
1194 wait_event(sb->s_writers.wait_unfrozen,
1195 sb->s_writers.frozen < level);
1196 }
1197
1198 #ifdef CONFIG_LOCKDEP
1199 acquire_freeze_lock(sb, level, !wait, _RET_IP_);
1200 #endif
1201 percpu_counter_inc(&sb->s_writers.counter[level-1]);
1202 /*
1203 * Make sure counter is updated before we check for frozen.
1204 * freeze_super() first sets frozen and then checks the counter.
1205 */
1206 smp_mb();
1207 if (unlikely(sb->s_writers.frozen >= level)) {
1208 __sb_end_write(sb, level);
1209 goto retry;
1210 }
1211 return 1;
1212 }
1213 EXPORT_SYMBOL(__sb_start_write);
1214
1215 /**
1216 * sb_wait_write - wait until all writers to given file system finish
1217 * @sb: the super for which we wait
1218 * @level: type of writers we wait for (normal vs page fault)
1219 *
1220 * This function waits until there are no writers of given type to given file
1221 * system. Caller of this function should make sure there can be no new writers
1222 * of type @level before calling this function. Otherwise this function can
1223 * livelock.
1224 */
1225 static void sb_wait_write(struct super_block *sb, int level)
1226 {
1227 s64 writers;
1228
1229 /*
1230 * We just cycle-through lockdep here so that it does not complain
1231 * about returning with lock to userspace
1232 */
1233 rwsem_acquire(&sb->s_writers.lock_map[level-1], 0, 0, _THIS_IP_);
1234 rwsem_release(&sb->s_writers.lock_map[level-1], 1, _THIS_IP_);
1235
1236 do {
1237 DEFINE_WAIT(wait);
1238
1239 /*
1240 * We use a barrier in prepare_to_wait() to separate setting
1241 * of frozen and checking of the counter
1242 */
1243 prepare_to_wait(&sb->s_writers.wait, &wait,
1244 TASK_UNINTERRUPTIBLE);
1245
1246 writers = percpu_counter_sum(&sb->s_writers.counter[level-1]);
1247 if (writers)
1248 schedule();
1249
1250 finish_wait(&sb->s_writers.wait, &wait);
1251 } while (writers);
1252 }
1253
1254 /**
1255 * freeze_super - lock the filesystem and force it into a consistent state
1256 * @sb: the super to lock
1257 *
1258 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1259 * freeze_fs. Subsequent calls to this without first thawing the fs will return
1260 * -EBUSY.
1261 *
1262 * During this function, sb->s_writers.frozen goes through these values:
1263 *
1264 * SB_UNFROZEN: File system is normal, all writes progress as usual.
1265 *
1266 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
1267 * writes should be blocked, though page faults are still allowed. We wait for
1268 * all writes to complete and then proceed to the next stage.
1269 *
1270 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
1271 * but internal fs threads can still modify the filesystem (although they
1272 * should not dirty new pages or inodes), writeback can run etc. After waiting
1273 * for all running page faults we sync the filesystem which will clean all
1274 * dirty pages and inodes (no new dirty pages or inodes can be created when
1275 * sync is running).
1276 *
1277 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
1278 * modification are blocked (e.g. XFS preallocation truncation on inode
1279 * reclaim). This is usually implemented by blocking new transactions for
1280 * filesystems that have them and need this additional guard. After all
1281 * internal writers are finished we call ->freeze_fs() to finish filesystem
1282 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
1283 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
1284 *
1285 * sb->s_writers.frozen is protected by sb->s_umount.
1286 */
1287 int freeze_super(struct super_block *sb)
1288 {
1289 int ret;
1290
1291 atomic_inc(&sb->s_active);
1292 down_write(&sb->s_umount);
1293 if (sb->s_writers.frozen != SB_UNFROZEN) {
1294 deactivate_locked_super(sb);
1295 return -EBUSY;
1296 }
1297
1298 if (!(sb->s_flags & MS_BORN)) {
1299 up_write(&sb->s_umount);
1300 return 0; /* sic - it's "nothing to do" */
1301 }
1302
1303 if (sb->s_flags & MS_RDONLY) {
1304 /* Nothing to do really... */
1305 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1306 up_write(&sb->s_umount);
1307 return 0;
1308 }
1309
1310 /* From now on, no new normal writers can start */
1311 sb->s_writers.frozen = SB_FREEZE_WRITE;
1312 smp_wmb();
1313
1314 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
1315 up_write(&sb->s_umount);
1316
1317 sb_wait_write(sb, SB_FREEZE_WRITE);
1318
1319 /* Now we go and block page faults... */
1320 down_write(&sb->s_umount);
1321 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
1322 smp_wmb();
1323
1324 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
1325
1326 /* All writers are done so after syncing there won't be dirty data */
1327 sync_filesystem(sb);
1328
1329 /* Now wait for internal filesystem counter */
1330 sb->s_writers.frozen = SB_FREEZE_FS;
1331 smp_wmb();
1332 sb_wait_write(sb, SB_FREEZE_FS);
1333
1334 if (sb->s_op->freeze_fs) {
1335 ret = sb->s_op->freeze_fs(sb);
1336 if (ret) {
1337 printk(KERN_ERR
1338 "VFS:Filesystem freeze failed\n");
1339 sb->s_writers.frozen = SB_UNFROZEN;
1340 smp_wmb();
1341 wake_up(&sb->s_writers.wait_unfrozen);
1342 deactivate_locked_super(sb);
1343 return ret;
1344 }
1345 }
1346 /*
1347 * This is just for debugging purposes so that fs can warn if it
1348 * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
1349 */
1350 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1351 up_write(&sb->s_umount);
1352 return 0;
1353 }
1354 EXPORT_SYMBOL(freeze_super);
1355
1356 /**
1357 * thaw_super -- unlock filesystem
1358 * @sb: the super to thaw
1359 *
1360 * Unlocks the filesystem and marks it writeable again after freeze_super().
1361 */
1362 int thaw_super(struct super_block *sb)
1363 {
1364 int error;
1365
1366 down_write(&sb->s_umount);
1367 if (sb->s_writers.frozen == SB_UNFROZEN) {
1368 up_write(&sb->s_umount);
1369 return -EINVAL;
1370 }
1371
1372 if (sb->s_flags & MS_RDONLY)
1373 goto out;
1374
1375 if (sb->s_op->unfreeze_fs) {
1376 error = sb->s_op->unfreeze_fs(sb);
1377 if (error) {
1378 printk(KERN_ERR
1379 "VFS:Filesystem thaw failed\n");
1380 up_write(&sb->s_umount);
1381 return error;
1382 }
1383 }
1384
1385 out:
1386 sb->s_writers.frozen = SB_UNFROZEN;
1387 smp_wmb();
1388 wake_up(&sb->s_writers.wait_unfrozen);
1389 deactivate_locked_super(sb);
1390
1391 return 0;
1392 }
1393 EXPORT_SYMBOL(thaw_super);