4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * super.c contains code to handle: - mount structures
8 * - filesystem drivers list
10 * - umount system call
13 * GK 2/5/95 - Changed to support mounting the root fs via NFS
15 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
16 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
17 * Added options to /proc/mounts:
18 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
19 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
20 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/blkdev.h>
26 #include <linux/mount.h>
27 #include <linux/security.h>
28 #include <linux/writeback.h> /* for the emergency remount stuff */
29 #include <linux/idr.h>
30 #include <linux/mutex.h>
31 #include <linux/backing-dev.h>
32 #include <linux/rculist_bl.h>
33 #include <linux/cleancache.h>
34 #include <linux/fsnotify.h>
35 #include <linux/lockdep.h>
36 #include <linux/user_namespace.h>
40 static LIST_HEAD(super_blocks
);
41 static DEFINE_SPINLOCK(sb_lock
);
43 static char *sb_writers_name
[SB_FREEZE_LEVELS
] = {
50 * One thing we have to be careful of with a per-sb shrinker is that we don't
51 * drop the last active reference to the superblock from within the shrinker.
52 * If that happens we could trigger unregistering the shrinker from within the
53 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
54 * take a passive reference to the superblock to avoid this from occurring.
56 static unsigned long super_cache_scan(struct shrinker
*shrink
,
57 struct shrink_control
*sc
)
59 struct super_block
*sb
;
66 sb
= container_of(shrink
, struct super_block
, s_shrink
);
69 * Deadlock avoidance. We may hold various FS locks, and we don't want
70 * to recurse into the FS that called us in clear_inode() and friends..
72 if (!(sc
->gfp_mask
& __GFP_FS
))
75 if (!trylock_super(sb
))
78 if (sb
->s_op
->nr_cached_objects
)
79 fs_objects
= sb
->s_op
->nr_cached_objects(sb
, sc
);
81 inodes
= list_lru_shrink_count(&sb
->s_inode_lru
, sc
);
82 dentries
= list_lru_shrink_count(&sb
->s_dentry_lru
, sc
);
83 total_objects
= dentries
+ inodes
+ fs_objects
+ 1;
87 /* proportion the scan between the caches */
88 dentries
= mult_frac(sc
->nr_to_scan
, dentries
, total_objects
);
89 inodes
= mult_frac(sc
->nr_to_scan
, inodes
, total_objects
);
90 fs_objects
= mult_frac(sc
->nr_to_scan
, fs_objects
, total_objects
);
93 * prune the dcache first as the icache is pinned by it, then
94 * prune the icache, followed by the filesystem specific caches
96 * Ensure that we always scan at least one object - memcg kmem
97 * accounting uses this to fully empty the caches.
99 sc
->nr_to_scan
= dentries
+ 1;
100 freed
= prune_dcache_sb(sb
, sc
);
101 sc
->nr_to_scan
= inodes
+ 1;
102 freed
+= prune_icache_sb(sb
, sc
);
105 sc
->nr_to_scan
= fs_objects
+ 1;
106 freed
+= sb
->s_op
->free_cached_objects(sb
, sc
);
109 up_read(&sb
->s_umount
);
113 static unsigned long super_cache_count(struct shrinker
*shrink
,
114 struct shrink_control
*sc
)
116 struct super_block
*sb
;
117 long total_objects
= 0;
119 sb
= container_of(shrink
, struct super_block
, s_shrink
);
122 * Don't call trylock_super as it is a potential
123 * scalability bottleneck. The counts could get updated
124 * between super_cache_count and super_cache_scan anyway.
125 * Call to super_cache_count with shrinker_rwsem held
126 * ensures the safety of call to list_lru_shrink_count() and
127 * s_op->nr_cached_objects().
129 if (sb
->s_op
&& sb
->s_op
->nr_cached_objects
)
130 total_objects
= sb
->s_op
->nr_cached_objects(sb
, sc
);
132 total_objects
+= list_lru_shrink_count(&sb
->s_dentry_lru
, sc
);
133 total_objects
+= list_lru_shrink_count(&sb
->s_inode_lru
, sc
);
135 total_objects
= vfs_pressure_ratio(total_objects
);
136 return total_objects
;
139 static void destroy_super_work(struct work_struct
*work
)
141 struct super_block
*s
= container_of(work
, struct super_block
,
145 for (i
= 0; i
< SB_FREEZE_LEVELS
; i
++)
146 percpu_free_rwsem(&s
->s_writers
.rw_sem
[i
]);
150 static void destroy_super_rcu(struct rcu_head
*head
)
152 struct super_block
*s
= container_of(head
, struct super_block
, rcu
);
153 INIT_WORK(&s
->destroy_work
, destroy_super_work
);
154 schedule_work(&s
->destroy_work
);
158 * destroy_super - frees a superblock
159 * @s: superblock to free
161 * Frees a superblock.
163 static void destroy_super(struct super_block
*s
)
165 list_lru_destroy(&s
->s_dentry_lru
);
166 list_lru_destroy(&s
->s_inode_lru
);
168 WARN_ON(!list_empty(&s
->s_mounts
));
169 put_user_ns(s
->s_user_ns
);
172 call_rcu(&s
->rcu
, destroy_super_rcu
);
176 * alloc_super - create new superblock
177 * @type: filesystem type superblock should belong to
178 * @flags: the mount flags
179 * @user_ns: User namespace for the super_block
181 * Allocates and initializes a new &struct super_block. alloc_super()
182 * returns a pointer new superblock or %NULL if allocation had failed.
184 static struct super_block
*alloc_super(struct file_system_type
*type
, int flags
,
185 struct user_namespace
*user_ns
)
187 struct super_block
*s
= kzalloc(sizeof(struct super_block
), GFP_USER
);
188 static const struct super_operations default_op
;
194 INIT_LIST_HEAD(&s
->s_mounts
);
195 s
->s_user_ns
= get_user_ns(user_ns
);
197 if (security_sb_alloc(s
))
200 for (i
= 0; i
< SB_FREEZE_LEVELS
; i
++) {
201 if (__percpu_init_rwsem(&s
->s_writers
.rw_sem
[i
],
203 &type
->s_writers_key
[i
]))
206 init_waitqueue_head(&s
->s_writers
.wait_unfrozen
);
207 s
->s_bdi
= &noop_backing_dev_info
;
209 INIT_HLIST_NODE(&s
->s_instances
);
210 INIT_HLIST_BL_HEAD(&s
->s_anon
);
211 mutex_init(&s
->s_sync_lock
);
212 INIT_LIST_HEAD(&s
->s_inodes
);
213 spin_lock_init(&s
->s_inode_list_lock
);
215 if (list_lru_init_memcg(&s
->s_dentry_lru
))
217 if (list_lru_init_memcg(&s
->s_inode_lru
))
220 init_rwsem(&s
->s_umount
);
221 lockdep_set_class(&s
->s_umount
, &type
->s_umount_key
);
223 * sget() can have s_umount recursion.
225 * When it cannot find a suitable sb, it allocates a new
226 * one (this one), and tries again to find a suitable old
229 * In case that succeeds, it will acquire the s_umount
230 * lock of the old one. Since these are clearly distrinct
231 * locks, and this object isn't exposed yet, there's no
234 * Annotate this by putting this lock in a different
237 down_write_nested(&s
->s_umount
, SINGLE_DEPTH_NESTING
);
239 atomic_set(&s
->s_active
, 1);
240 mutex_init(&s
->s_vfs_rename_mutex
);
241 lockdep_set_class(&s
->s_vfs_rename_mutex
, &type
->s_vfs_rename_key
);
242 mutex_init(&s
->s_dquot
.dqio_mutex
);
243 mutex_init(&s
->s_dquot
.dqonoff_mutex
);
244 s
->s_maxbytes
= MAX_NON_LFS
;
245 s
->s_op
= &default_op
;
246 s
->s_time_gran
= 1000000000;
247 s
->cleancache_poolid
= CLEANCACHE_NO_POOL
;
249 s
->s_shrink
.seeks
= DEFAULT_SEEKS
;
250 s
->s_shrink
.scan_objects
= super_cache_scan
;
251 s
->s_shrink
.count_objects
= super_cache_count
;
252 s
->s_shrink
.batch
= 1024;
253 s
->s_shrink
.flags
= SHRINKER_NUMA_AWARE
| SHRINKER_MEMCG_AWARE
;
261 /* Superblock refcounting */
264 * Drop a superblock's refcount. The caller must hold sb_lock.
266 static void __put_super(struct super_block
*sb
)
268 if (!--sb
->s_count
) {
269 list_del_init(&sb
->s_list
);
275 * put_super - drop a temporary reference to superblock
276 * @sb: superblock in question
278 * Drops a temporary reference, frees superblock if there's no
281 static void put_super(struct super_block
*sb
)
285 spin_unlock(&sb_lock
);
290 * deactivate_locked_super - drop an active reference to superblock
291 * @s: superblock to deactivate
293 * Drops an active reference to superblock, converting it into a temprory
294 * one if there is no other active references left. In that case we
295 * tell fs driver to shut it down and drop the temporary reference we
298 * Caller holds exclusive lock on superblock; that lock is released.
300 void deactivate_locked_super(struct super_block
*s
)
302 struct file_system_type
*fs
= s
->s_type
;
303 if (atomic_dec_and_test(&s
->s_active
)) {
304 cleancache_invalidate_fs(s
);
305 unregister_shrinker(&s
->s_shrink
);
309 * Since list_lru_destroy() may sleep, we cannot call it from
310 * put_super(), where we hold the sb_lock. Therefore we destroy
311 * the lru lists right now.
313 list_lru_destroy(&s
->s_dentry_lru
);
314 list_lru_destroy(&s
->s_inode_lru
);
319 up_write(&s
->s_umount
);
323 EXPORT_SYMBOL(deactivate_locked_super
);
326 * deactivate_super - drop an active reference to superblock
327 * @s: superblock to deactivate
329 * Variant of deactivate_locked_super(), except that superblock is *not*
330 * locked by caller. If we are going to drop the final active reference,
331 * lock will be acquired prior to that.
333 void deactivate_super(struct super_block
*s
)
335 if (!atomic_add_unless(&s
->s_active
, -1, 1)) {
336 down_write(&s
->s_umount
);
337 deactivate_locked_super(s
);
341 EXPORT_SYMBOL(deactivate_super
);
344 * grab_super - acquire an active reference
345 * @s: reference we are trying to make active
347 * Tries to acquire an active reference. grab_super() is used when we
348 * had just found a superblock in super_blocks or fs_type->fs_supers
349 * and want to turn it into a full-blown active reference. grab_super()
350 * is called with sb_lock held and drops it. Returns 1 in case of
351 * success, 0 if we had failed (superblock contents was already dead or
352 * dying when grab_super() had been called). Note that this is only
353 * called for superblocks not in rundown mode (== ones still on ->fs_supers
354 * of their type), so increment of ->s_count is OK here.
356 static int grab_super(struct super_block
*s
) __releases(sb_lock
)
359 spin_unlock(&sb_lock
);
360 down_write(&s
->s_umount
);
361 if ((s
->s_flags
& MS_BORN
) && atomic_inc_not_zero(&s
->s_active
)) {
365 up_write(&s
->s_umount
);
371 * trylock_super - try to grab ->s_umount shared
372 * @sb: reference we are trying to grab
374 * Try to prevent fs shutdown. This is used in places where we
375 * cannot take an active reference but we need to ensure that the
376 * filesystem is not shut down while we are working on it. It returns
377 * false if we cannot acquire s_umount or if we lose the race and
378 * filesystem already got into shutdown, and returns true with the s_umount
379 * lock held in read mode in case of success. On successful return,
380 * the caller must drop the s_umount lock when done.
382 * Note that unlike get_super() et.al. this one does *not* bump ->s_count.
383 * The reason why it's safe is that we are OK with doing trylock instead
384 * of down_read(). There's a couple of places that are OK with that, but
385 * it's very much not a general-purpose interface.
387 bool trylock_super(struct super_block
*sb
)
389 if (down_read_trylock(&sb
->s_umount
)) {
390 if (!hlist_unhashed(&sb
->s_instances
) &&
391 sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
393 up_read(&sb
->s_umount
);
400 * generic_shutdown_super - common helper for ->kill_sb()
401 * @sb: superblock to kill
403 * generic_shutdown_super() does all fs-independent work on superblock
404 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
405 * that need destruction out of superblock, call generic_shutdown_super()
406 * and release aforementioned objects. Note: dentries and inodes _are_
407 * taken care of and do not need specific handling.
409 * Upon calling this function, the filesystem may no longer alter or
410 * rearrange the set of dentries belonging to this super_block, nor may it
411 * change the attachments of dentries to inodes.
413 void generic_shutdown_super(struct super_block
*sb
)
415 const struct super_operations
*sop
= sb
->s_op
;
418 shrink_dcache_for_umount(sb
);
420 sb
->s_flags
&= ~MS_ACTIVE
;
422 fsnotify_unmount_inodes(sb
);
423 cgroup_writeback_umount();
427 if (sb
->s_dio_done_wq
) {
428 destroy_workqueue(sb
->s_dio_done_wq
);
429 sb
->s_dio_done_wq
= NULL
;
435 if (!list_empty(&sb
->s_inodes
)) {
436 printk("VFS: Busy inodes after unmount of %s. "
437 "Self-destruct in 5 seconds. Have a nice day...\n",
442 /* should be initialized for __put_super_and_need_restart() */
443 hlist_del_init(&sb
->s_instances
);
444 spin_unlock(&sb_lock
);
445 up_write(&sb
->s_umount
);
448 EXPORT_SYMBOL(generic_shutdown_super
);
451 * sget_userns - find or create a superblock
452 * @type: filesystem type superblock should belong to
453 * @test: comparison callback
454 * @set: setup callback
455 * @flags: mount flags
456 * @user_ns: User namespace for the super_block
457 * @data: argument to each of them
459 struct super_block
*sget_userns(struct file_system_type
*type
,
460 int (*test
)(struct super_block
*,void *),
461 int (*set
)(struct super_block
*,void *),
462 int flags
, struct user_namespace
*user_ns
,
465 struct super_block
*s
= NULL
;
466 struct super_block
*old
;
469 if (!(flags
& (MS_KERNMOUNT
|MS_SUBMOUNT
)) &&
470 !(type
->fs_flags
& FS_USERNS_MOUNT
) &&
471 !capable(CAP_SYS_ADMIN
))
472 return ERR_PTR(-EPERM
);
476 hlist_for_each_entry(old
, &type
->fs_supers
, s_instances
) {
477 if (!test(old
, data
))
479 if (user_ns
!= old
->s_user_ns
) {
480 spin_unlock(&sb_lock
);
482 up_write(&s
->s_umount
);
485 return ERR_PTR(-EBUSY
);
487 if (!grab_super(old
))
490 up_write(&s
->s_umount
);
498 spin_unlock(&sb_lock
);
499 s
= alloc_super(type
, (flags
& ~MS_SUBMOUNT
), user_ns
);
501 return ERR_PTR(-ENOMEM
);
507 spin_unlock(&sb_lock
);
508 up_write(&s
->s_umount
);
513 strlcpy(s
->s_id
, type
->name
, sizeof(s
->s_id
));
514 list_add_tail(&s
->s_list
, &super_blocks
);
515 hlist_add_head(&s
->s_instances
, &type
->fs_supers
);
516 spin_unlock(&sb_lock
);
517 get_filesystem(type
);
518 register_shrinker(&s
->s_shrink
);
522 EXPORT_SYMBOL(sget_userns
);
525 * sget - find or create a superblock
526 * @type: filesystem type superblock should belong to
527 * @test: comparison callback
528 * @set: setup callback
529 * @flags: mount flags
530 * @data: argument to each of them
532 struct super_block
*sget(struct file_system_type
*type
,
533 int (*test
)(struct super_block
*,void *),
534 int (*set
)(struct super_block
*,void *),
538 struct user_namespace
*user_ns
= current_user_ns();
540 /* We don't yet pass the user namespace of the parent
541 * mount through to here so always use &init_user_ns
542 * until that changes.
544 if (flags
& MS_SUBMOUNT
)
545 user_ns
= &init_user_ns
;
547 /* Ensure the requestor has permissions over the target filesystem */
548 if (!(flags
& (MS_KERNMOUNT
|MS_SUBMOUNT
)) && !ns_capable(user_ns
, CAP_SYS_ADMIN
))
549 return ERR_PTR(-EPERM
);
551 return sget_userns(type
, test
, set
, flags
, user_ns
, data
);
556 void drop_super(struct super_block
*sb
)
558 up_read(&sb
->s_umount
);
562 EXPORT_SYMBOL(drop_super
);
565 * iterate_supers - call function for all active superblocks
566 * @f: function to call
567 * @arg: argument to pass to it
569 * Scans the superblock list and calls given function, passing it
570 * locked superblock and given argument.
572 void iterate_supers(void (*f
)(struct super_block
*, void *), void *arg
)
574 struct super_block
*sb
, *p
= NULL
;
577 list_for_each_entry(sb
, &super_blocks
, s_list
) {
578 if (hlist_unhashed(&sb
->s_instances
))
581 spin_unlock(&sb_lock
);
583 down_read(&sb
->s_umount
);
584 if (sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
586 up_read(&sb
->s_umount
);
595 spin_unlock(&sb_lock
);
599 * iterate_supers_type - call function for superblocks of given type
601 * @f: function to call
602 * @arg: argument to pass to it
604 * Scans the superblock list and calls given function, passing it
605 * locked superblock and given argument.
607 void iterate_supers_type(struct file_system_type
*type
,
608 void (*f
)(struct super_block
*, void *), void *arg
)
610 struct super_block
*sb
, *p
= NULL
;
613 hlist_for_each_entry(sb
, &type
->fs_supers
, s_instances
) {
615 spin_unlock(&sb_lock
);
617 down_read(&sb
->s_umount
);
618 if (sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
620 up_read(&sb
->s_umount
);
629 spin_unlock(&sb_lock
);
632 EXPORT_SYMBOL(iterate_supers_type
);
635 * get_super - get the superblock of a device
636 * @bdev: device to get the superblock for
638 * Scans the superblock list and finds the superblock of the file system
639 * mounted on the device given. %NULL is returned if no match is found.
642 struct super_block
*get_super(struct block_device
*bdev
)
644 struct super_block
*sb
;
651 list_for_each_entry(sb
, &super_blocks
, s_list
) {
652 if (hlist_unhashed(&sb
->s_instances
))
654 if (sb
->s_bdev
== bdev
) {
656 spin_unlock(&sb_lock
);
657 down_read(&sb
->s_umount
);
659 if (sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
661 up_read(&sb
->s_umount
);
662 /* nope, got unmounted */
668 spin_unlock(&sb_lock
);
672 EXPORT_SYMBOL(get_super
);
675 * get_super_thawed - get thawed superblock of a device
676 * @bdev: device to get the superblock for
678 * Scans the superblock list and finds the superblock of the file system
679 * mounted on the device. The superblock is returned once it is thawed
680 * (or immediately if it was not frozen). %NULL is returned if no match
683 struct super_block
*get_super_thawed(struct block_device
*bdev
)
686 struct super_block
*s
= get_super(bdev
);
687 if (!s
|| s
->s_writers
.frozen
== SB_UNFROZEN
)
689 up_read(&s
->s_umount
);
690 wait_event(s
->s_writers
.wait_unfrozen
,
691 s
->s_writers
.frozen
== SB_UNFROZEN
);
695 EXPORT_SYMBOL(get_super_thawed
);
698 * get_active_super - get an active reference to the superblock of a device
699 * @bdev: device to get the superblock for
701 * Scans the superblock list and finds the superblock of the file system
702 * mounted on the device given. Returns the superblock with an active
703 * reference or %NULL if none was found.
705 struct super_block
*get_active_super(struct block_device
*bdev
)
707 struct super_block
*sb
;
714 list_for_each_entry(sb
, &super_blocks
, s_list
) {
715 if (hlist_unhashed(&sb
->s_instances
))
717 if (sb
->s_bdev
== bdev
) {
720 up_write(&sb
->s_umount
);
724 spin_unlock(&sb_lock
);
728 struct super_block
*user_get_super(dev_t dev
)
730 struct super_block
*sb
;
734 list_for_each_entry(sb
, &super_blocks
, s_list
) {
735 if (hlist_unhashed(&sb
->s_instances
))
737 if (sb
->s_dev
== dev
) {
739 spin_unlock(&sb_lock
);
740 down_read(&sb
->s_umount
);
742 if (sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
744 up_read(&sb
->s_umount
);
745 /* nope, got unmounted */
751 spin_unlock(&sb_lock
);
756 * do_remount_sb - asks filesystem to change mount options.
757 * @sb: superblock in question
758 * @flags: numeric part of options
759 * @data: the rest of options
760 * @force: whether or not to force the change
762 * Alters the mount options of a mounted file system.
764 int do_remount_sb(struct super_block
*sb
, int flags
, void *data
, int force
)
769 if (sb
->s_writers
.frozen
!= SB_UNFROZEN
)
773 if (!(flags
& MS_RDONLY
) && bdev_read_only(sb
->s_bdev
))
777 remount_ro
= (flags
& MS_RDONLY
) && !(sb
->s_flags
& MS_RDONLY
);
780 if (!hlist_empty(&sb
->s_pins
)) {
781 up_write(&sb
->s_umount
);
782 group_pin_kill(&sb
->s_pins
);
783 down_write(&sb
->s_umount
);
786 if (sb
->s_writers
.frozen
!= SB_UNFROZEN
)
788 remount_ro
= (flags
& MS_RDONLY
) && !(sb
->s_flags
& MS_RDONLY
);
791 shrink_dcache_sb(sb
);
793 /* If we are remounting RDONLY and current sb is read/write,
794 make sure there are no rw files opened */
797 sb
->s_readonly_remount
= 1;
800 retval
= sb_prepare_remount_readonly(sb
);
806 if (sb
->s_op
->remount_fs
) {
807 retval
= sb
->s_op
->remount_fs(sb
, &flags
, data
);
810 goto cancel_readonly
;
811 /* If forced remount, go ahead despite any errors */
812 WARN(1, "forced remount of a %s fs returned %i\n",
813 sb
->s_type
->name
, retval
);
816 sb
->s_flags
= (sb
->s_flags
& ~MS_RMT_MASK
) | (flags
& MS_RMT_MASK
);
817 /* Needs to be ordered wrt mnt_is_readonly() */
819 sb
->s_readonly_remount
= 0;
822 * Some filesystems modify their metadata via some other path than the
823 * bdev buffer cache (eg. use a private mapping, or directories in
824 * pagecache, etc). Also file data modifications go via their own
825 * mappings. So If we try to mount readonly then copy the filesystem
826 * from bdev, we could get stale data, so invalidate it to give a best
827 * effort at coherency.
829 if (remount_ro
&& sb
->s_bdev
)
830 invalidate_bdev(sb
->s_bdev
);
834 sb
->s_readonly_remount
= 0;
838 static void do_emergency_remount(struct work_struct
*work
)
840 struct super_block
*sb
, *p
= NULL
;
843 list_for_each_entry(sb
, &super_blocks
, s_list
) {
844 if (hlist_unhashed(&sb
->s_instances
))
847 spin_unlock(&sb_lock
);
848 down_write(&sb
->s_umount
);
849 if (sb
->s_root
&& sb
->s_bdev
&& (sb
->s_flags
& MS_BORN
) &&
850 !(sb
->s_flags
& MS_RDONLY
)) {
852 * What lock protects sb->s_flags??
854 do_remount_sb(sb
, MS_RDONLY
, NULL
, 1);
856 up_write(&sb
->s_umount
);
864 spin_unlock(&sb_lock
);
866 printk("Emergency Remount complete\n");
869 void emergency_remount(void)
871 struct work_struct
*work
;
873 work
= kmalloc(sizeof(*work
), GFP_ATOMIC
);
875 INIT_WORK(work
, do_emergency_remount
);
881 * Unnamed block devices are dummy devices used by virtual
882 * filesystems which don't use real block-devices. -- jrs
885 static DEFINE_IDA(unnamed_dev_ida
);
886 static DEFINE_SPINLOCK(unnamed_dev_lock
);/* protects the above */
887 /* Many userspace utilities consider an FSID of 0 invalid.
888 * Always return at least 1 from get_anon_bdev.
890 static int unnamed_dev_start
= 1;
892 int get_anon_bdev(dev_t
*p
)
898 if (ida_pre_get(&unnamed_dev_ida
, GFP_ATOMIC
) == 0)
900 spin_lock(&unnamed_dev_lock
);
901 error
= ida_get_new_above(&unnamed_dev_ida
, unnamed_dev_start
, &dev
);
903 unnamed_dev_start
= dev
+ 1;
904 spin_unlock(&unnamed_dev_lock
);
905 if (error
== -EAGAIN
)
906 /* We raced and lost with another CPU. */
911 if (dev
>= (1 << MINORBITS
)) {
912 spin_lock(&unnamed_dev_lock
);
913 ida_remove(&unnamed_dev_ida
, dev
);
914 if (unnamed_dev_start
> dev
)
915 unnamed_dev_start
= dev
;
916 spin_unlock(&unnamed_dev_lock
);
919 *p
= MKDEV(0, dev
& MINORMASK
);
922 EXPORT_SYMBOL(get_anon_bdev
);
924 void free_anon_bdev(dev_t dev
)
926 int slot
= MINOR(dev
);
927 spin_lock(&unnamed_dev_lock
);
928 ida_remove(&unnamed_dev_ida
, slot
);
929 if (slot
< unnamed_dev_start
)
930 unnamed_dev_start
= slot
;
931 spin_unlock(&unnamed_dev_lock
);
933 EXPORT_SYMBOL(free_anon_bdev
);
935 int set_anon_super(struct super_block
*s
, void *data
)
937 return get_anon_bdev(&s
->s_dev
);
940 EXPORT_SYMBOL(set_anon_super
);
942 void kill_anon_super(struct super_block
*sb
)
944 dev_t dev
= sb
->s_dev
;
945 generic_shutdown_super(sb
);
949 EXPORT_SYMBOL(kill_anon_super
);
951 void kill_litter_super(struct super_block
*sb
)
954 d_genocide(sb
->s_root
);
958 EXPORT_SYMBOL(kill_litter_super
);
960 static int ns_test_super(struct super_block
*sb
, void *data
)
962 return sb
->s_fs_info
== data
;
965 static int ns_set_super(struct super_block
*sb
, void *data
)
967 sb
->s_fs_info
= data
;
968 return set_anon_super(sb
, NULL
);
971 struct dentry
*mount_ns(struct file_system_type
*fs_type
,
972 int flags
, void *data
, void *ns
, struct user_namespace
*user_ns
,
973 int (*fill_super
)(struct super_block
*, void *, int))
975 struct super_block
*sb
;
977 /* Don't allow mounting unless the caller has CAP_SYS_ADMIN
978 * over the namespace.
980 if (!(flags
& MS_KERNMOUNT
) && !ns_capable(user_ns
, CAP_SYS_ADMIN
))
981 return ERR_PTR(-EPERM
);
983 sb
= sget_userns(fs_type
, ns_test_super
, ns_set_super
, flags
,
990 err
= fill_super(sb
, data
, flags
& MS_SILENT
? 1 : 0);
992 deactivate_locked_super(sb
);
996 sb
->s_flags
|= MS_ACTIVE
;
999 return dget(sb
->s_root
);
1002 EXPORT_SYMBOL(mount_ns
);
1005 static int set_bdev_super(struct super_block
*s
, void *data
)
1008 s
->s_dev
= s
->s_bdev
->bd_dev
;
1011 * We set the bdi here to the queue backing, file systems can
1012 * overwrite this in ->fill_super()
1014 s
->s_bdi
= bdev_get_queue(s
->s_bdev
)->backing_dev_info
;
1018 static int test_bdev_super(struct super_block
*s
, void *data
)
1020 return (void *)s
->s_bdev
== data
;
1023 struct dentry
*mount_bdev(struct file_system_type
*fs_type
,
1024 int flags
, const char *dev_name
, void *data
,
1025 int (*fill_super
)(struct super_block
*, void *, int))
1027 struct block_device
*bdev
;
1028 struct super_block
*s
;
1029 fmode_t mode
= FMODE_READ
| FMODE_EXCL
;
1032 if (!(flags
& MS_RDONLY
))
1033 mode
|= FMODE_WRITE
;
1035 bdev
= blkdev_get_by_path(dev_name
, mode
, fs_type
);
1037 return ERR_CAST(bdev
);
1039 if (current_user_ns() != &init_user_ns
) {
1041 * For userns mounts, disallow mounting if bdev is open for
1044 if (!atomic_dec_unless_positive(&bdev
->bd_inode
->i_writecount
)) {
1048 if (bdev
->bd_contains
!= bdev
&&
1049 !atomic_dec_unless_positive(&bdev
->bd_contains
->bd_inode
->i_writecount
)) {
1050 atomic_inc(&bdev
->bd_inode
->i_writecount
);
1057 * once the super is inserted into the list by sget, s_umount
1058 * will protect the lockfs code from trying to start a snapshot
1059 * while we are mounting
1061 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
1062 if (bdev
->bd_fsfreeze_count
> 0) {
1063 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
1067 s
= sget(fs_type
, test_bdev_super
, set_bdev_super
, flags
| MS_NOSEC
,
1069 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
1074 if ((flags
^ s
->s_flags
) & MS_RDONLY
) {
1075 deactivate_locked_super(s
);
1081 * s_umount nests inside bd_mutex during
1082 * __invalidate_device(). blkdev_put() acquires
1083 * bd_mutex and can't be called under s_umount. Drop
1084 * s_umount temporarily. This is safe as we're
1085 * holding an active reference.
1087 up_write(&s
->s_umount
);
1088 blkdev_put(bdev
, mode
);
1089 down_write(&s
->s_umount
);
1091 char b
[BDEVNAME_SIZE
];
1094 strlcpy(s
->s_id
, bdevname(bdev
, b
), sizeof(s
->s_id
));
1095 sb_set_blocksize(s
, block_size(bdev
));
1096 error
= fill_super(s
, data
, flags
& MS_SILENT
? 1 : 0);
1098 deactivate_locked_super(s
);
1102 s
->s_flags
|= MS_ACTIVE
;
1106 return dget(s
->s_root
);
1111 if (current_user_ns() != &init_user_ns
) {
1112 atomic_inc(&bdev
->bd_inode
->i_writecount
);
1113 if (bdev
->bd_contains
!= bdev
)
1114 atomic_inc(&bdev
->bd_contains
->bd_inode
->i_writecount
);
1117 blkdev_put(bdev
, mode
);
1119 return ERR_PTR(error
);
1121 EXPORT_SYMBOL(mount_bdev
);
1123 void kill_block_super(struct super_block
*sb
)
1125 struct block_device
*bdev
= sb
->s_bdev
;
1126 fmode_t mode
= sb
->s_mode
;
1128 bdev
->bd_super
= NULL
;
1129 generic_shutdown_super(sb
);
1130 sync_blockdev(bdev
);
1131 WARN_ON_ONCE(!(mode
& FMODE_EXCL
));
1132 if (sb
->s_user_ns
!= &init_user_ns
) {
1133 atomic_inc(&bdev
->bd_inode
->i_writecount
);
1134 if (bdev
->bd_contains
!= bdev
)
1135 atomic_inc(&bdev
->bd_contains
->bd_inode
->i_writecount
);
1137 blkdev_put(bdev
, mode
| FMODE_EXCL
);
1140 EXPORT_SYMBOL(kill_block_super
);
1143 struct dentry
*mount_nodev(struct file_system_type
*fs_type
,
1144 int flags
, void *data
,
1145 int (*fill_super
)(struct super_block
*, void *, int))
1148 struct super_block
*s
= sget(fs_type
, NULL
, set_anon_super
, flags
, NULL
);
1153 error
= fill_super(s
, data
, flags
& MS_SILENT
? 1 : 0);
1155 deactivate_locked_super(s
);
1156 return ERR_PTR(error
);
1158 s
->s_flags
|= MS_ACTIVE
;
1159 return dget(s
->s_root
);
1161 EXPORT_SYMBOL(mount_nodev
);
1163 static int compare_single(struct super_block
*s
, void *p
)
1168 struct dentry
*mount_single(struct file_system_type
*fs_type
,
1169 int flags
, void *data
,
1170 int (*fill_super
)(struct super_block
*, void *, int))
1172 struct super_block
*s
;
1175 s
= sget(fs_type
, compare_single
, set_anon_super
, flags
, NULL
);
1179 error
= fill_super(s
, data
, flags
& MS_SILENT
? 1 : 0);
1181 deactivate_locked_super(s
);
1182 return ERR_PTR(error
);
1184 s
->s_flags
|= MS_ACTIVE
;
1186 do_remount_sb(s
, flags
, data
, 0);
1188 return dget(s
->s_root
);
1190 EXPORT_SYMBOL(mount_single
);
1193 mount_fs(struct file_system_type
*type
, int flags
, const char *name
, void *data
)
1195 struct dentry
*root
;
1196 struct super_block
*sb
;
1197 char *secdata
= NULL
;
1198 int error
= -ENOMEM
;
1200 if (data
&& !(type
->fs_flags
& FS_BINARY_MOUNTDATA
)) {
1201 secdata
= alloc_secdata();
1205 error
= security_sb_copy_data(data
, secdata
);
1207 goto out_free_secdata
;
1210 root
= type
->mount(type
, flags
, name
, data
);
1212 error
= PTR_ERR(root
);
1213 goto out_free_secdata
;
1217 WARN_ON(!sb
->s_bdi
);
1218 sb
->s_flags
|= MS_BORN
;
1220 error
= security_sb_kern_mount(sb
, flags
, secdata
);
1225 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1226 * but s_maxbytes was an unsigned long long for many releases. Throw
1227 * this warning for a little while to try and catch filesystems that
1228 * violate this rule.
1230 WARN((sb
->s_maxbytes
< 0), "%s set sb->s_maxbytes to "
1231 "negative value (%lld)\n", type
->name
, sb
->s_maxbytes
);
1233 up_write(&sb
->s_umount
);
1234 free_secdata(secdata
);
1238 deactivate_locked_super(sb
);
1240 free_secdata(secdata
);
1242 return ERR_PTR(error
);
1246 * This is an internal function, please use sb_end_{write,pagefault,intwrite}
1249 void __sb_end_write(struct super_block
*sb
, int level
)
1251 percpu_up_read(sb
->s_writers
.rw_sem
+ level
-1);
1253 EXPORT_SYMBOL(__sb_end_write
);
1256 * This is an internal function, please use sb_start_{write,pagefault,intwrite}
1259 int __sb_start_write(struct super_block
*sb
, int level
, bool wait
)
1261 bool force_trylock
= false;
1264 #ifdef CONFIG_LOCKDEP
1266 * We want lockdep to tell us about possible deadlocks with freezing
1267 * but it's it bit tricky to properly instrument it. Getting a freeze
1268 * protection works as getting a read lock but there are subtle
1269 * problems. XFS for example gets freeze protection on internal level
1270 * twice in some cases, which is OK only because we already hold a
1271 * freeze protection also on higher level. Due to these cases we have
1272 * to use wait == F (trylock mode) which must not fail.
1277 for (i
= 0; i
< level
- 1; i
++)
1278 if (percpu_rwsem_is_held(sb
->s_writers
.rw_sem
+ i
)) {
1279 force_trylock
= true;
1284 if (wait
&& !force_trylock
)
1285 percpu_down_read(sb
->s_writers
.rw_sem
+ level
-1);
1287 ret
= percpu_down_read_trylock(sb
->s_writers
.rw_sem
+ level
-1);
1289 WARN_ON(force_trylock
& !ret
);
1292 EXPORT_SYMBOL(__sb_start_write
);
1295 * sb_wait_write - wait until all writers to given file system finish
1296 * @sb: the super for which we wait
1297 * @level: type of writers we wait for (normal vs page fault)
1299 * This function waits until there are no writers of given type to given file
1302 static void sb_wait_write(struct super_block
*sb
, int level
)
1304 percpu_down_write(sb
->s_writers
.rw_sem
+ level
-1);
1306 * We are going to return to userspace and forget about this lock, the
1307 * ownership goes to the caller of thaw_super() which does unlock.
1309 * FIXME: we should do this before return from freeze_super() after we
1310 * called sync_filesystem(sb) and s_op->freeze_fs(sb), and thaw_super()
1311 * should re-acquire these locks before s_op->unfreeze_fs(sb). However
1312 * this leads to lockdep false-positives, so currently we do the early
1313 * release right after acquire.
1315 percpu_rwsem_release(sb
->s_writers
.rw_sem
+ level
-1, 0, _THIS_IP_
);
1318 static void sb_freeze_unlock(struct super_block
*sb
)
1322 for (level
= 0; level
< SB_FREEZE_LEVELS
; ++level
)
1323 percpu_rwsem_acquire(sb
->s_writers
.rw_sem
+ level
, 0, _THIS_IP_
);
1325 for (level
= SB_FREEZE_LEVELS
- 1; level
>= 0; level
--)
1326 percpu_up_write(sb
->s_writers
.rw_sem
+ level
);
1330 * freeze_super - lock the filesystem and force it into a consistent state
1331 * @sb: the super to lock
1333 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1334 * freeze_fs. Subsequent calls to this without first thawing the fs will return
1337 * During this function, sb->s_writers.frozen goes through these values:
1339 * SB_UNFROZEN: File system is normal, all writes progress as usual.
1341 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
1342 * writes should be blocked, though page faults are still allowed. We wait for
1343 * all writes to complete and then proceed to the next stage.
1345 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
1346 * but internal fs threads can still modify the filesystem (although they
1347 * should not dirty new pages or inodes), writeback can run etc. After waiting
1348 * for all running page faults we sync the filesystem which will clean all
1349 * dirty pages and inodes (no new dirty pages or inodes can be created when
1352 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
1353 * modification are blocked (e.g. XFS preallocation truncation on inode
1354 * reclaim). This is usually implemented by blocking new transactions for
1355 * filesystems that have them and need this additional guard. After all
1356 * internal writers are finished we call ->freeze_fs() to finish filesystem
1357 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
1358 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
1360 * sb->s_writers.frozen is protected by sb->s_umount.
1362 int freeze_super(struct super_block
*sb
)
1366 atomic_inc(&sb
->s_active
);
1367 down_write(&sb
->s_umount
);
1368 if (sb
->s_writers
.frozen
!= SB_UNFROZEN
) {
1369 deactivate_locked_super(sb
);
1373 if (!(sb
->s_flags
& MS_BORN
)) {
1374 up_write(&sb
->s_umount
);
1375 return 0; /* sic - it's "nothing to do" */
1378 if (sb
->s_flags
& MS_RDONLY
) {
1379 /* Nothing to do really... */
1380 sb
->s_writers
.frozen
= SB_FREEZE_COMPLETE
;
1381 up_write(&sb
->s_umount
);
1385 sb
->s_writers
.frozen
= SB_FREEZE_WRITE
;
1386 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
1387 up_write(&sb
->s_umount
);
1388 sb_wait_write(sb
, SB_FREEZE_WRITE
);
1389 down_write(&sb
->s_umount
);
1391 /* Now we go and block page faults... */
1392 sb
->s_writers
.frozen
= SB_FREEZE_PAGEFAULT
;
1393 sb_wait_write(sb
, SB_FREEZE_PAGEFAULT
);
1395 /* All writers are done so after syncing there won't be dirty data */
1396 sync_filesystem(sb
);
1398 /* Now wait for internal filesystem counter */
1399 sb
->s_writers
.frozen
= SB_FREEZE_FS
;
1400 sb_wait_write(sb
, SB_FREEZE_FS
);
1402 if (sb
->s_op
->freeze_fs
) {
1403 ret
= sb
->s_op
->freeze_fs(sb
);
1406 "VFS:Filesystem freeze failed\n");
1407 sb
->s_writers
.frozen
= SB_UNFROZEN
;
1408 sb_freeze_unlock(sb
);
1409 wake_up(&sb
->s_writers
.wait_unfrozen
);
1410 deactivate_locked_super(sb
);
1415 * For debugging purposes so that fs can warn if it sees write activity
1416 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
1418 sb
->s_writers
.frozen
= SB_FREEZE_COMPLETE
;
1419 up_write(&sb
->s_umount
);
1422 EXPORT_SYMBOL(freeze_super
);
1425 * thaw_super -- unlock filesystem
1426 * @sb: the super to thaw
1428 * Unlocks the filesystem and marks it writeable again after freeze_super().
1430 int thaw_super(struct super_block
*sb
)
1434 down_write(&sb
->s_umount
);
1435 if (sb
->s_writers
.frozen
!= SB_FREEZE_COMPLETE
) {
1436 up_write(&sb
->s_umount
);
1440 if (sb
->s_flags
& MS_RDONLY
) {
1441 sb
->s_writers
.frozen
= SB_UNFROZEN
;
1445 if (sb
->s_op
->unfreeze_fs
) {
1446 error
= sb
->s_op
->unfreeze_fs(sb
);
1449 "VFS:Filesystem thaw failed\n");
1450 up_write(&sb
->s_umount
);
1455 sb
->s_writers
.frozen
= SB_UNFROZEN
;
1456 sb_freeze_unlock(sb
);
1458 wake_up(&sb
->s_writers
.wait_unfrozen
);
1459 deactivate_locked_super(sb
);
1462 EXPORT_SYMBOL(thaw_super
);