1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
17 #include "xfs_ialloc.h"
18 #include "xfs_alloc.h"
19 #include "xfs_rtalloc.h"
21 #include "xfs_trans.h"
22 #include "xfs_trans_priv.h"
24 #include "xfs_error.h"
25 #include "xfs_quota.h"
26 #include "xfs_fsops.h"
27 #include "xfs_icache.h"
28 #include "xfs_sysfs.h"
29 #include "xfs_rmap_btree.h"
30 #include "xfs_refcount_btree.h"
31 #include "xfs_reflink.h"
32 #include "xfs_extent_busy.h"
33 #include "xfs_health.h"
34 #include "xfs_trace.h"
37 static DEFINE_MUTEX(xfs_uuid_table_mutex
);
38 static int xfs_uuid_table_size
;
39 static uuid_t
*xfs_uuid_table
;
42 xfs_uuid_table_free(void)
44 if (xfs_uuid_table_size
== 0)
46 kmem_free(xfs_uuid_table
);
47 xfs_uuid_table
= NULL
;
48 xfs_uuid_table_size
= 0;
52 * See if the UUID is unique among mounted XFS filesystems.
53 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
59 uuid_t
*uuid
= &mp
->m_sb
.sb_uuid
;
62 /* Publish UUID in struct super_block */
63 uuid_copy(&mp
->m_super
->s_uuid
, uuid
);
65 if (xfs_has_nouuid(mp
))
68 if (uuid_is_null(uuid
)) {
69 xfs_warn(mp
, "Filesystem has null UUID - can't mount");
73 mutex_lock(&xfs_uuid_table_mutex
);
74 for (i
= 0, hole
= -1; i
< xfs_uuid_table_size
; i
++) {
75 if (uuid_is_null(&xfs_uuid_table
[i
])) {
79 if (uuid_equal(uuid
, &xfs_uuid_table
[i
]))
84 xfs_uuid_table
= krealloc(xfs_uuid_table
,
85 (xfs_uuid_table_size
+ 1) * sizeof(*xfs_uuid_table
),
86 GFP_KERNEL
| __GFP_NOFAIL
);
87 hole
= xfs_uuid_table_size
++;
89 xfs_uuid_table
[hole
] = *uuid
;
90 mutex_unlock(&xfs_uuid_table_mutex
);
95 mutex_unlock(&xfs_uuid_table_mutex
);
96 xfs_warn(mp
, "Filesystem has duplicate UUID %pU - can't mount", uuid
);
102 struct xfs_mount
*mp
)
104 uuid_t
*uuid
= &mp
->m_sb
.sb_uuid
;
107 if (xfs_has_nouuid(mp
))
110 mutex_lock(&xfs_uuid_table_mutex
);
111 for (i
= 0; i
< xfs_uuid_table_size
; i
++) {
112 if (uuid_is_null(&xfs_uuid_table
[i
]))
114 if (!uuid_equal(uuid
, &xfs_uuid_table
[i
]))
116 memset(&xfs_uuid_table
[i
], 0, sizeof(uuid_t
));
119 ASSERT(i
< xfs_uuid_table_size
);
120 mutex_unlock(&xfs_uuid_table_mutex
);
124 * Check size of device based on the (data/realtime) block count.
125 * Note: this check is used by the growfs code as well as mount.
128 xfs_sb_validate_fsb_count(
132 ASSERT(PAGE_SHIFT
>= sbp
->sb_blocklog
);
133 ASSERT(sbp
->sb_blocklog
>= BBSHIFT
);
135 /* Limited by ULONG_MAX of page cache index */
136 if (nblocks
>> (PAGE_SHIFT
- sbp
->sb_blocklog
) > ULONG_MAX
)
144 * Does the initial read of the superblock.
148 struct xfs_mount
*mp
,
151 unsigned int sector_size
;
153 struct xfs_sb
*sbp
= &mp
->m_sb
;
155 int loud
= !(flags
& XFS_MFSI_QUIET
);
156 const struct xfs_buf_ops
*buf_ops
;
158 ASSERT(mp
->m_sb_bp
== NULL
);
159 ASSERT(mp
->m_ddev_targp
!= NULL
);
162 * For the initial read, we must guess at the sector
163 * size based on the block device. It's enough to
164 * get the sb_sectsize out of the superblock and
165 * then reread with the proper length.
166 * We don't verify it yet, because it may not be complete.
168 sector_size
= xfs_getsize_buftarg(mp
->m_ddev_targp
);
172 * Allocate a (locked) buffer to hold the superblock. This will be kept
173 * around at all times to optimize access to the superblock. Therefore,
174 * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count
178 error
= xfs_buf_read_uncached(mp
->m_ddev_targp
, XFS_SB_DADDR
,
179 BTOBB(sector_size
), XBF_NO_IOACCT
, &bp
,
183 xfs_warn(mp
, "SB validate failed with error %d.", error
);
184 /* bad CRC means corrupted metadata */
185 if (error
== -EFSBADCRC
)
186 error
= -EFSCORRUPTED
;
191 * Initialize the mount structure from the superblock.
193 xfs_sb_from_disk(sbp
, bp
->b_addr
);
196 * If we haven't validated the superblock, do so now before we try
197 * to check the sector size and reread the superblock appropriately.
199 if (sbp
->sb_magicnum
!= XFS_SB_MAGIC
) {
201 xfs_warn(mp
, "Invalid superblock magic number");
207 * We must be able to do sector-sized and sector-aligned IO.
209 if (sector_size
> sbp
->sb_sectsize
) {
211 xfs_warn(mp
, "device supports %u byte sectors (not %u)",
212 sector_size
, sbp
->sb_sectsize
);
217 if (buf_ops
== NULL
) {
219 * Re-read the superblock so the buffer is correctly sized,
220 * and properly verified.
223 sector_size
= sbp
->sb_sectsize
;
224 buf_ops
= loud
? &xfs_sb_buf_ops
: &xfs_sb_quiet_buf_ops
;
228 mp
->m_features
|= xfs_sb_version_to_features(sbp
);
229 xfs_reinit_percpu_counters(mp
);
231 /* no need to be quiet anymore, so reset the buf ops */
232 bp
->b_ops
= &xfs_sb_buf_ops
;
244 * If the sunit/swidth change would move the precomputed root inode value, we
245 * must reject the ondisk change because repair will stumble over that.
246 * However, we allow the mount to proceed because we never rejected this
247 * combination before. Returns true to update the sb, false otherwise.
250 xfs_check_new_dalign(
251 struct xfs_mount
*mp
,
255 struct xfs_sb
*sbp
= &mp
->m_sb
;
258 calc_ino
= xfs_ialloc_calc_rootino(mp
, new_dalign
);
259 trace_xfs_check_new_dalign(mp
, new_dalign
, calc_ino
);
261 if (sbp
->sb_rootino
== calc_ino
) {
267 "Cannot change stripe alignment; would require moving root inode.");
270 * XXX: Next time we add a new incompat feature, this should start
271 * returning -EINVAL to fail the mount. Until then, spit out a warning
272 * that we're ignoring the administrator's instructions.
274 xfs_warn(mp
, "Skipping superblock stripe alignment update.");
280 * If we were provided with new sunit/swidth values as mount options, make sure
281 * that they pass basic alignment and superblock feature checks, and convert
282 * them into the same units (FSB) that everything else expects. This step
283 * /must/ be done before computing the inode geometry.
286 xfs_validate_new_dalign(
287 struct xfs_mount
*mp
)
289 if (mp
->m_dalign
== 0)
293 * If stripe unit and stripe width are not multiples
294 * of the fs blocksize turn off alignment.
296 if ((BBTOB(mp
->m_dalign
) & mp
->m_blockmask
) ||
297 (BBTOB(mp
->m_swidth
) & mp
->m_blockmask
)) {
299 "alignment check failed: sunit/swidth vs. blocksize(%d)",
300 mp
->m_sb
.sb_blocksize
);
304 * Convert the stripe unit and width to FSBs.
306 mp
->m_dalign
= XFS_BB_TO_FSBT(mp
, mp
->m_dalign
);
307 if (mp
->m_dalign
&& (mp
->m_sb
.sb_agblocks
% mp
->m_dalign
)) {
309 "alignment check failed: sunit/swidth vs. agsize(%d)",
310 mp
->m_sb
.sb_agblocks
);
312 } else if (mp
->m_dalign
) {
313 mp
->m_swidth
= XFS_BB_TO_FSBT(mp
, mp
->m_swidth
);
316 "alignment check failed: sunit(%d) less than bsize(%d)",
317 mp
->m_dalign
, mp
->m_sb
.sb_blocksize
);
322 if (!xfs_has_dalign(mp
)) {
324 "cannot change alignment: superblock does not support data alignment");
331 /* Update alignment values based on mount options and sb values. */
333 xfs_update_alignment(
334 struct xfs_mount
*mp
)
336 struct xfs_sb
*sbp
= &mp
->m_sb
;
342 if (sbp
->sb_unit
== mp
->m_dalign
&&
343 sbp
->sb_width
== mp
->m_swidth
)
346 error
= xfs_check_new_dalign(mp
, mp
->m_dalign
, &update_sb
);
347 if (error
|| !update_sb
)
350 sbp
->sb_unit
= mp
->m_dalign
;
351 sbp
->sb_width
= mp
->m_swidth
;
352 mp
->m_update_sb
= true;
353 } else if (!xfs_has_noalign(mp
) && xfs_has_dalign(mp
)) {
354 mp
->m_dalign
= sbp
->sb_unit
;
355 mp
->m_swidth
= sbp
->sb_width
;
362 * precalculate the low space thresholds for dynamic speculative preallocation.
365 xfs_set_low_space_thresholds(
366 struct xfs_mount
*mp
)
368 uint64_t dblocks
= mp
->m_sb
.sb_dblocks
;
369 uint64_t rtexts
= mp
->m_sb
.sb_rextents
;
372 do_div(dblocks
, 100);
375 for (i
= 0; i
< XFS_LOWSP_MAX
; i
++) {
376 mp
->m_low_space
[i
] = dblocks
* (i
+ 1);
377 mp
->m_low_rtexts
[i
] = rtexts
* (i
+ 1);
382 * Check that the data (and log if separate) is an ok size.
386 struct xfs_mount
*mp
)
392 d
= (xfs_daddr_t
)XFS_FSB_TO_BB(mp
, mp
->m_sb
.sb_dblocks
);
393 if (XFS_BB_TO_FSB(mp
, d
) != mp
->m_sb
.sb_dblocks
) {
394 xfs_warn(mp
, "filesystem size mismatch detected");
397 error
= xfs_buf_read_uncached(mp
->m_ddev_targp
,
398 d
- XFS_FSS_TO_BB(mp
, 1),
399 XFS_FSS_TO_BB(mp
, 1), 0, &bp
, NULL
);
401 xfs_warn(mp
, "last sector read failed");
406 if (mp
->m_logdev_targp
== mp
->m_ddev_targp
)
409 d
= (xfs_daddr_t
)XFS_FSB_TO_BB(mp
, mp
->m_sb
.sb_logblocks
);
410 if (XFS_BB_TO_FSB(mp
, d
) != mp
->m_sb
.sb_logblocks
) {
411 xfs_warn(mp
, "log size mismatch detected");
414 error
= xfs_buf_read_uncached(mp
->m_logdev_targp
,
415 d
- XFS_FSB_TO_BB(mp
, 1),
416 XFS_FSB_TO_BB(mp
, 1), 0, &bp
, NULL
);
418 xfs_warn(mp
, "log device read failed");
426 * Clear the quotaflags in memory and in the superblock.
429 xfs_mount_reset_sbqflags(
430 struct xfs_mount
*mp
)
434 /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */
435 if (mp
->m_sb
.sb_qflags
== 0)
437 spin_lock(&mp
->m_sb_lock
);
438 mp
->m_sb
.sb_qflags
= 0;
439 spin_unlock(&mp
->m_sb_lock
);
441 if (!xfs_fs_writable(mp
, SB_FREEZE_WRITE
))
444 return xfs_sync_sb(mp
, false);
448 xfs_default_resblks(xfs_mount_t
*mp
)
453 * We default to 5% or 8192 fsbs of space reserved, whichever is
454 * smaller. This is intended to cover concurrent allocation
455 * transactions when we initially hit enospc. These each require a 4
456 * block reservation. Hence by default we cover roughly 2000 concurrent
457 * allocation reservations.
459 resblks
= mp
->m_sb
.sb_dblocks
;
461 resblks
= min_t(uint64_t, resblks
, 8192);
465 /* Ensure the summary counts are correct. */
467 xfs_check_summary_counts(
468 struct xfs_mount
*mp
)
471 * The AG0 superblock verifier rejects in-progress filesystems,
472 * so we should never see the flag set this far into mounting.
474 if (mp
->m_sb
.sb_inprogress
) {
475 xfs_err(mp
, "sb_inprogress set after log recovery??");
477 return -EFSCORRUPTED
;
481 * Now the log is mounted, we know if it was an unclean shutdown or
482 * not. If it was, with the first phase of recovery has completed, we
483 * have consistent AG blocks on disk. We have not recovered EFIs yet,
484 * but they are recovered transactionally in the second recovery phase
487 * If the log was clean when we mounted, we can check the summary
488 * counters. If any of them are obviously incorrect, we can recompute
489 * them from the AGF headers in the next step.
491 if (xfs_is_clean(mp
) &&
492 (mp
->m_sb
.sb_fdblocks
> mp
->m_sb
.sb_dblocks
||
493 !xfs_verify_icount(mp
, mp
->m_sb
.sb_icount
) ||
494 mp
->m_sb
.sb_ifree
> mp
->m_sb
.sb_icount
))
495 xfs_fs_mark_sick(mp
, XFS_SICK_FS_COUNTERS
);
498 * We can safely re-initialise incore superblock counters from the
499 * per-ag data. These may not be correct if the filesystem was not
500 * cleanly unmounted, so we waited for recovery to finish before doing
503 * If the filesystem was cleanly unmounted or the previous check did
504 * not flag anything weird, then we can trust the values in the
505 * superblock to be correct and we don't need to do anything here.
506 * Otherwise, recalculate the summary counters.
508 if ((!xfs_has_lazysbcount(mp
) || xfs_is_clean(mp
)) &&
509 !xfs_fs_has_sickness(mp
, XFS_SICK_FS_COUNTERS
))
512 return xfs_initialize_perag_data(mp
, mp
->m_sb
.sb_agcount
);
516 * Flush and reclaim dirty inodes in preparation for unmount. Inodes and
517 * internal inode structures can be sitting in the CIL and AIL at this point,
518 * so we need to unpin them, write them back and/or reclaim them before unmount
519 * can proceed. In other words, callers are required to have inactivated all
522 * An inode cluster that has been freed can have its buffer still pinned in
523 * memory because the transaction is still sitting in a iclog. The stale inodes
524 * on that buffer will be pinned to the buffer until the transaction hits the
525 * disk and the callbacks run. Pushing the AIL will skip the stale inodes and
526 * may never see the pinned buffer, so nothing will push out the iclog and
529 * Hence we need to force the log to unpin everything first. However, log
530 * forces don't wait for the discards they issue to complete, so we have to
531 * explicitly wait for them to complete here as well.
533 * Then we can tell the world we are unmounting so that error handling knows
534 * that the filesystem is going away and we should error out anything that we
535 * have been retrying in the background. This will prevent never-ending
536 * retries in AIL pushing from hanging the unmount.
538 * Finally, we can push the AIL to clean all the remaining dirty objects, then
539 * reclaim the remaining inodes that are still in memory at this point in time.
542 xfs_unmount_flush_inodes(
543 struct xfs_mount
*mp
)
545 xfs_log_force(mp
, XFS_LOG_SYNC
);
546 xfs_extent_busy_wait_all(mp
);
547 flush_workqueue(xfs_discard_wq
);
549 set_bit(XFS_OPSTATE_UNMOUNTING
, &mp
->m_opstate
);
551 xfs_ail_push_all_sync(mp
->m_ail
);
552 xfs_inodegc_stop(mp
);
553 cancel_delayed_work_sync(&mp
->m_reclaim_work
);
554 xfs_reclaim_inodes(mp
);
555 xfs_health_unmount(mp
);
559 xfs_mount_setup_inode_geom(
560 struct xfs_mount
*mp
)
562 struct xfs_ino_geometry
*igeo
= M_IGEO(mp
);
564 igeo
->attr_fork_offset
= xfs_bmap_compute_attr_offset(mp
);
565 ASSERT(igeo
->attr_fork_offset
< XFS_LITINO(mp
));
567 xfs_ialloc_setup_geometry(mp
);
571 * This function does the following on an initial mount of a file system:
572 * - reads the superblock from disk and init the mount struct
573 * - if we're a 32-bit kernel, do a size check on the superblock
574 * so we don't mount terabyte filesystems
575 * - init mount struct realtime fields
576 * - allocate inode hash table for fs
577 * - init directory manager
578 * - perform recovery and init the log manager
582 struct xfs_mount
*mp
)
584 struct xfs_sb
*sbp
= &(mp
->m_sb
);
585 struct xfs_inode
*rip
;
586 struct xfs_ino_geometry
*igeo
= M_IGEO(mp
);
592 xfs_sb_mount_common(mp
, sbp
);
595 * Check for a mismatched features2 values. Older kernels read & wrote
596 * into the wrong sb offset for sb_features2 on some platforms due to
597 * xfs_sb_t not being 64bit size aligned when sb_features2 was added,
598 * which made older superblock reading/writing routines swap it as a
601 * For backwards compatibility, we make both slots equal.
603 * If we detect a mismatched field, we OR the set bits into the existing
604 * features2 field in case it has already been modified; we don't want
605 * to lose any features. We then update the bad location with the ORed
606 * value so that older kernels will see any features2 flags. The
607 * superblock writeback code ensures the new sb_features2 is copied to
608 * sb_bad_features2 before it is logged or written to disk.
610 if (xfs_sb_has_mismatched_features2(sbp
)) {
611 xfs_warn(mp
, "correcting sb_features alignment problem");
612 sbp
->sb_features2
|= sbp
->sb_bad_features2
;
613 mp
->m_update_sb
= true;
617 /* always use v2 inodes by default now */
618 if (!(mp
->m_sb
.sb_versionnum
& XFS_SB_VERSION_NLINKBIT
)) {
619 mp
->m_sb
.sb_versionnum
|= XFS_SB_VERSION_NLINKBIT
;
620 mp
->m_features
|= XFS_FEAT_NLINK
;
621 mp
->m_update_sb
= true;
625 * If we were given new sunit/swidth options, do some basic validation
626 * checks and convert the incore dalign and swidth values to the
627 * same units (FSB) that everything else uses. This /must/ happen
628 * before computing the inode geometry.
630 error
= xfs_validate_new_dalign(mp
);
634 xfs_alloc_compute_maxlevels(mp
);
635 xfs_bmap_compute_maxlevels(mp
, XFS_DATA_FORK
);
636 xfs_bmap_compute_maxlevels(mp
, XFS_ATTR_FORK
);
637 xfs_mount_setup_inode_geom(mp
);
638 xfs_rmapbt_compute_maxlevels(mp
);
639 xfs_refcountbt_compute_maxlevels(mp
);
642 * Check if sb_agblocks is aligned at stripe boundary. If sb_agblocks
643 * is NOT aligned turn off m_dalign since allocator alignment is within
644 * an ag, therefore ag has to be aligned at stripe boundary. Note that
645 * we must compute the free space and rmap btree geometry before doing
648 error
= xfs_update_alignment(mp
);
652 /* enable fail_at_unmount as default */
653 mp
->m_fail_unmount
= true;
655 error
= xfs_sysfs_init(&mp
->m_kobj
, &xfs_mp_ktype
,
656 NULL
, mp
->m_super
->s_id
);
660 error
= xfs_sysfs_init(&mp
->m_stats
.xs_kobj
, &xfs_stats_ktype
,
661 &mp
->m_kobj
, "stats");
663 goto out_remove_sysfs
;
665 error
= xfs_error_sysfs_init(mp
);
669 error
= xfs_errortag_init(mp
);
671 goto out_remove_error_sysfs
;
673 error
= xfs_uuid_mount(mp
);
675 goto out_remove_errortag
;
678 * Update the preferred write size based on the information from the
679 * on-disk superblock.
681 mp
->m_allocsize_log
=
682 max_t(uint32_t, sbp
->sb_blocklog
, mp
->m_allocsize_log
);
683 mp
->m_allocsize_blocks
= 1U << (mp
->m_allocsize_log
- sbp
->sb_blocklog
);
685 /* set the low space thresholds for dynamic preallocation */
686 xfs_set_low_space_thresholds(mp
);
689 * If enabled, sparse inode chunk alignment is expected to match the
690 * cluster size. Full inode chunk alignment must match the chunk size,
691 * but that is checked on sb read verification...
693 if (xfs_has_sparseinodes(mp
) &&
694 mp
->m_sb
.sb_spino_align
!=
695 XFS_B_TO_FSBT(mp
, igeo
->inode_cluster_size_raw
)) {
697 "Sparse inode block alignment (%u) must match cluster size (%llu).",
698 mp
->m_sb
.sb_spino_align
,
699 XFS_B_TO_FSBT(mp
, igeo
->inode_cluster_size_raw
));
701 goto out_remove_uuid
;
705 * Check that the data (and log if separate) is an ok size.
707 error
= xfs_check_sizes(mp
);
709 goto out_remove_uuid
;
712 * Initialize realtime fields in the mount structure
714 error
= xfs_rtmount_init(mp
);
716 xfs_warn(mp
, "RT mount failed");
717 goto out_remove_uuid
;
721 * Copies the low order bits of the timestamp and the randomly
722 * set "sequence" number out of a UUID.
725 (get_unaligned_be16(&sbp
->sb_uuid
.b
[8]) << 16) |
726 get_unaligned_be16(&sbp
->sb_uuid
.b
[4]);
727 mp
->m_fixedfsid
[1] = get_unaligned_be32(&sbp
->sb_uuid
.b
[0]);
729 error
= xfs_da_mount(mp
);
731 xfs_warn(mp
, "Failed dir/attr init: %d", error
);
732 goto out_remove_uuid
;
736 * Initialize the precomputed transaction reservations values.
741 * Allocate and initialize the per-ag data.
743 error
= xfs_initialize_perag(mp
, sbp
->sb_agcount
, &mp
->m_maxagi
);
745 xfs_warn(mp
, "Failed per-ag init: %d", error
);
749 if (XFS_IS_CORRUPT(mp
, !sbp
->sb_logblocks
)) {
750 xfs_warn(mp
, "no log defined");
751 error
= -EFSCORRUPTED
;
755 error
= xfs_inodegc_register_shrinker(mp
);
760 * Log's mount-time initialization. The first part of recovery can place
761 * some items on the AIL, to be handled when recovery is finished or
764 error
= xfs_log_mount(mp
, mp
->m_logdev_targp
,
765 XFS_FSB_TO_DADDR(mp
, sbp
->sb_logstart
),
766 XFS_FSB_TO_BB(mp
, sbp
->sb_logblocks
));
768 xfs_warn(mp
, "log mount failed");
769 goto out_inodegc_shrinker
;
772 /* Make sure the summary counts are ok. */
773 error
= xfs_check_summary_counts(mp
);
775 goto out_log_dealloc
;
777 /* Enable background inode inactivation workers. */
778 xfs_inodegc_start(mp
);
779 xfs_blockgc_start(mp
);
782 * Now that we've recovered any pending superblock feature bit
783 * additions, we can finish setting up the attr2 behaviour for the
784 * mount. The noattr2 option overrides the superblock flag, so only
785 * check the superblock feature flag if the mount option is not set.
787 if (xfs_has_noattr2(mp
)) {
788 mp
->m_features
&= ~XFS_FEAT_ATTR2
;
789 } else if (!xfs_has_attr2(mp
) &&
790 (mp
->m_sb
.sb_features2
& XFS_SB_VERSION2_ATTR2BIT
)) {
791 mp
->m_features
|= XFS_FEAT_ATTR2
;
795 * Get and sanity-check the root inode.
796 * Save the pointer to it in the mount structure.
798 error
= xfs_iget(mp
, NULL
, sbp
->sb_rootino
, XFS_IGET_UNTRUSTED
,
799 XFS_ILOCK_EXCL
, &rip
);
802 "Failed to read root inode 0x%llx, error %d",
803 sbp
->sb_rootino
, -error
);
804 goto out_log_dealloc
;
809 if (XFS_IS_CORRUPT(mp
, !S_ISDIR(VFS_I(rip
)->i_mode
))) {
810 xfs_warn(mp
, "corrupted root inode %llu: not a directory",
811 (unsigned long long)rip
->i_ino
);
812 xfs_iunlock(rip
, XFS_ILOCK_EXCL
);
813 error
= -EFSCORRUPTED
;
816 mp
->m_rootip
= rip
; /* save it */
818 xfs_iunlock(rip
, XFS_ILOCK_EXCL
);
821 * Initialize realtime inode pointers in the mount structure
823 error
= xfs_rtmount_inodes(mp
);
826 * Free up the root inode.
828 xfs_warn(mp
, "failed to read RT inodes");
833 * If this is a read-only mount defer the superblock updates until
834 * the next remount into writeable mode. Otherwise we would never
835 * perform the update e.g. for the root filesystem.
837 if (mp
->m_update_sb
&& !xfs_is_readonly(mp
)) {
838 error
= xfs_sync_sb(mp
, false);
840 xfs_warn(mp
, "failed to write sb changes");
846 * Initialise the XFS quota management subsystem for this mount
848 if (XFS_IS_QUOTA_ON(mp
)) {
849 error
= xfs_qm_newmount(mp
, "amount
, "aflags
);
854 * If a file system had quotas running earlier, but decided to
855 * mount without -o uquota/pquota/gquota options, revoke the
856 * quotachecked license.
858 if (mp
->m_sb
.sb_qflags
& XFS_ALL_QUOTA_ACCT
) {
859 xfs_notice(mp
, "resetting quota flags");
860 error
= xfs_mount_reset_sbqflags(mp
);
867 * Finish recovering the file system. This part needed to be delayed
868 * until after the root and real-time bitmap inodes were consistently
869 * read in. Temporarily create per-AG space reservations for metadata
870 * btree shape changes because space freeing transactions (for inode
871 * inactivation) require the per-AG reservation in lieu of reserving
874 error
= xfs_fs_reserve_ag_blocks(mp
);
875 if (error
&& error
== -ENOSPC
)
877 "ENOSPC reserving per-AG metadata pool, log recovery may fail.");
878 error
= xfs_log_mount_finish(mp
);
879 xfs_fs_unreserve_ag_blocks(mp
);
881 xfs_warn(mp
, "log mount finish failed");
886 * Now the log is fully replayed, we can transition to full read-only
887 * mode for read-only mounts. This will sync all the metadata and clean
888 * the log so that the recovery we just performed does not have to be
889 * replayed again on the next mount.
891 * We use the same quiesce mechanism as the rw->ro remount, as they are
892 * semantically identical operations.
894 if (xfs_is_readonly(mp
) && !xfs_has_norecovery(mp
))
898 * Complete the quota initialisation, post-log-replay component.
901 ASSERT(mp
->m_qflags
== 0);
902 mp
->m_qflags
= quotaflags
;
904 xfs_qm_mount_quotas(mp
);
908 * Now we are mounted, reserve a small amount of unused space for
909 * privileged transactions. This is needed so that transaction
910 * space required for critical operations can dip into this pool
911 * when at ENOSPC. This is needed for operations like create with
912 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
913 * are not allowed to use this reserved space.
915 * This may drive us straight to ENOSPC on mount, but that implies
916 * we were already there on the last unmount. Warn if this occurs.
918 if (!xfs_is_readonly(mp
)) {
919 resblks
= xfs_default_resblks(mp
);
920 error
= xfs_reserve_blocks(mp
, &resblks
, NULL
);
923 "Unable to allocate reserve blocks. Continuing without reserve pool.");
925 /* Recover any CoW blocks that never got remapped. */
926 error
= xfs_reflink_recover_cow(mp
);
929 "Error %d recovering leftover CoW allocations.", error
);
930 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
934 /* Reserve AG blocks for future btree expansion. */
935 error
= xfs_fs_reserve_ag_blocks(mp
);
936 if (error
&& error
!= -ENOSPC
)
943 xfs_fs_unreserve_ag_blocks(mp
);
945 xfs_qm_unmount_quotas(mp
);
947 xfs_rtunmount_inodes(mp
);
950 /* Clean out dquots that might be in memory after quotacheck. */
954 * Inactivate all inodes that might still be in memory after a log
955 * intent recovery failure so that reclaim can free them. Metadata
956 * inodes and the root directory shouldn't need inactivation, but the
957 * mount failed for some reason, so pull down all the state and flee.
959 xfs_inodegc_flush(mp
);
962 * Flush all inode reclamation work and flush the log.
963 * We have to do this /after/ rtunmount and qm_unmount because those
964 * two will have scheduled delayed reclaim for the rt/quota inodes.
966 * This is slightly different from the unmountfs call sequence
967 * because we could be tearing down a partially set up mount. In
968 * particular, if log_mount_finish fails we bail out without calling
969 * qm_unmount_quotas and therefore rely on qm_unmount to release the
972 xfs_unmount_flush_inodes(mp
);
974 xfs_log_mount_cancel(mp
);
975 out_inodegc_shrinker
:
976 unregister_shrinker(&mp
->m_inodegc_shrinker
);
978 if (mp
->m_logdev_targp
&& mp
->m_logdev_targp
!= mp
->m_ddev_targp
)
979 xfs_buftarg_drain(mp
->m_logdev_targp
);
980 xfs_buftarg_drain(mp
->m_ddev_targp
);
986 xfs_uuid_unmount(mp
);
988 xfs_errortag_del(mp
);
989 out_remove_error_sysfs
:
990 xfs_error_sysfs_del(mp
);
992 xfs_sysfs_del(&mp
->m_stats
.xs_kobj
);
994 xfs_sysfs_del(&mp
->m_kobj
);
1000 * This flushes out the inodes,dquots and the superblock, unmounts the
1001 * log and makes sure that incore structures are freed.
1005 struct xfs_mount
*mp
)
1011 * Perform all on-disk metadata updates required to inactivate inodes
1012 * that the VFS evicted earlier in the unmount process. Freeing inodes
1013 * and discarding CoW fork preallocations can cause shape changes to
1014 * the free inode and refcount btrees, respectively, so we must finish
1015 * this before we discard the metadata space reservations. Metadata
1016 * inodes and the root directory do not require inactivation.
1018 xfs_inodegc_flush(mp
);
1020 xfs_blockgc_stop(mp
);
1021 xfs_fs_unreserve_ag_blocks(mp
);
1022 xfs_qm_unmount_quotas(mp
);
1023 xfs_rtunmount_inodes(mp
);
1024 xfs_irele(mp
->m_rootip
);
1026 xfs_unmount_flush_inodes(mp
);
1031 * Unreserve any blocks we have so that when we unmount we don't account
1032 * the reserved free space as used. This is really only necessary for
1033 * lazy superblock counting because it trusts the incore superblock
1034 * counters to be absolutely correct on clean unmount.
1036 * We don't bother correcting this elsewhere for lazy superblock
1037 * counting because on mount of an unclean filesystem we reconstruct the
1038 * correct counter value and this is irrelevant.
1040 * For non-lazy counter filesystems, this doesn't matter at all because
1041 * we only every apply deltas to the superblock and hence the incore
1042 * value does not matter....
1045 error
= xfs_reserve_blocks(mp
, &resblks
, NULL
);
1047 xfs_warn(mp
, "Unable to free reserved block pool. "
1048 "Freespace may not be correct on next mount.");
1050 xfs_log_unmount(mp
);
1052 xfs_uuid_unmount(mp
);
1055 xfs_errortag_clearall(mp
);
1057 unregister_shrinker(&mp
->m_inodegc_shrinker
);
1060 xfs_errortag_del(mp
);
1061 xfs_error_sysfs_del(mp
);
1062 xfs_sysfs_del(&mp
->m_stats
.xs_kobj
);
1063 xfs_sysfs_del(&mp
->m_kobj
);
1067 * Determine whether modifications can proceed. The caller specifies the minimum
1068 * freeze level for which modifications should not be allowed. This allows
1069 * certain operations to proceed while the freeze sequence is in progress, if
1074 struct xfs_mount
*mp
,
1077 ASSERT(level
> SB_UNFROZEN
);
1078 if ((mp
->m_super
->s_writers
.frozen
>= level
) ||
1079 xfs_is_shutdown(mp
) || xfs_is_readonly(mp
))
1087 struct xfs_mount
*mp
,
1098 * If the reserve pool is depleted, put blocks back into it
1099 * first. Most of the time the pool is full.
1101 if (likely(mp
->m_resblks
== mp
->m_resblks_avail
)) {
1102 percpu_counter_add(&mp
->m_fdblocks
, delta
);
1106 spin_lock(&mp
->m_sb_lock
);
1107 res_used
= (long long)(mp
->m_resblks
- mp
->m_resblks_avail
);
1109 if (res_used
> delta
) {
1110 mp
->m_resblks_avail
+= delta
;
1113 mp
->m_resblks_avail
= mp
->m_resblks
;
1114 percpu_counter_add(&mp
->m_fdblocks
, delta
);
1116 spin_unlock(&mp
->m_sb_lock
);
1121 * Taking blocks away, need to be more accurate the closer we
1124 * If the counter has a value of less than 2 * max batch size,
1125 * then make everything serialise as we are real close to
1128 if (__percpu_counter_compare(&mp
->m_fdblocks
, 2 * XFS_FDBLOCKS_BATCH
,
1129 XFS_FDBLOCKS_BATCH
) < 0)
1132 batch
= XFS_FDBLOCKS_BATCH
;
1135 * Set aside allocbt blocks because these blocks are tracked as free
1136 * space but not available for allocation. Technically this means that a
1137 * single reservation cannot consume all remaining free space, but the
1138 * ratio of allocbt blocks to usable free blocks should be rather small.
1139 * The tradeoff without this is that filesystems that maintain high
1140 * perag block reservations can over reserve physical block availability
1141 * and fail physical allocation, which leads to much more serious
1142 * problems (i.e. transaction abort, pagecache discards, etc.) than
1143 * slightly premature -ENOSPC.
1145 set_aside
= mp
->m_alloc_set_aside
+ atomic64_read(&mp
->m_allocbt_blks
);
1146 percpu_counter_add_batch(&mp
->m_fdblocks
, delta
, batch
);
1147 if (__percpu_counter_compare(&mp
->m_fdblocks
, set_aside
,
1148 XFS_FDBLOCKS_BATCH
) >= 0) {
1154 * lock up the sb for dipping into reserves before releasing the space
1155 * that took us to ENOSPC.
1157 spin_lock(&mp
->m_sb_lock
);
1158 percpu_counter_add(&mp
->m_fdblocks
, -delta
);
1160 goto fdblocks_enospc
;
1162 lcounter
= (long long)mp
->m_resblks_avail
+ delta
;
1163 if (lcounter
>= 0) {
1164 mp
->m_resblks_avail
= lcounter
;
1165 spin_unlock(&mp
->m_sb_lock
);
1169 "Reserve blocks depleted! Consider increasing reserve pool size.");
1172 spin_unlock(&mp
->m_sb_lock
);
1178 struct xfs_mount
*mp
,
1184 spin_lock(&mp
->m_sb_lock
);
1185 lcounter
= mp
->m_sb
.sb_frextents
+ delta
;
1189 mp
->m_sb
.sb_frextents
= lcounter
;
1190 spin_unlock(&mp
->m_sb_lock
);
1195 * Used to free the superblock along various error paths.
1199 struct xfs_mount
*mp
)
1201 struct xfs_buf
*bp
= mp
->m_sb_bp
;
1209 * If the underlying (data/log/rt) device is readonly, there are some
1210 * operations that cannot proceed.
1213 xfs_dev_is_read_only(
1214 struct xfs_mount
*mp
,
1217 if (xfs_readonly_buftarg(mp
->m_ddev_targp
) ||
1218 xfs_readonly_buftarg(mp
->m_logdev_targp
) ||
1219 (mp
->m_rtdev_targp
&& xfs_readonly_buftarg(mp
->m_rtdev_targp
))) {
1220 xfs_notice(mp
, "%s required on read-only device.", message
);
1221 xfs_notice(mp
, "write access unavailable, cannot proceed.");
1227 /* Force the summary counters to be recalculated at next mount. */
1229 xfs_force_summary_recalc(
1230 struct xfs_mount
*mp
)
1232 if (!xfs_has_lazysbcount(mp
))
1235 xfs_fs_mark_sick(mp
, XFS_SICK_FS_COUNTERS
);
1239 * Enable a log incompat feature flag in the primary superblock. The caller
1240 * cannot have any other transactions in progress.
1243 xfs_add_incompat_log_feature(
1244 struct xfs_mount
*mp
,
1247 struct xfs_dsb
*dsb
;
1250 ASSERT(hweight32(feature
) == 1);
1251 ASSERT(!(feature
& XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN
));
1254 * Force the log to disk and kick the background AIL thread to reduce
1255 * the chances that the bwrite will stall waiting for the AIL to unpin
1256 * the primary superblock buffer. This isn't a data integrity
1257 * operation, so we don't need a synchronous push.
1259 error
= xfs_log_force(mp
, XFS_LOG_SYNC
);
1262 xfs_ail_push_all(mp
->m_ail
);
1265 * Lock the primary superblock buffer to serialize all callers that
1266 * are trying to set feature bits.
1268 xfs_buf_lock(mp
->m_sb_bp
);
1269 xfs_buf_hold(mp
->m_sb_bp
);
1271 if (xfs_is_shutdown(mp
)) {
1276 if (xfs_sb_has_incompat_log_feature(&mp
->m_sb
, feature
))
1280 * Write the primary superblock to disk immediately, because we need
1281 * the log_incompat bit to be set in the primary super now to protect
1282 * the log items that we're going to commit later.
1284 dsb
= mp
->m_sb_bp
->b_addr
;
1285 xfs_sb_to_disk(dsb
, &mp
->m_sb
);
1286 dsb
->sb_features_log_incompat
|= cpu_to_be32(feature
);
1287 error
= xfs_bwrite(mp
->m_sb_bp
);
1292 * Add the feature bits to the incore superblock before we unlock the
1295 xfs_sb_add_incompat_log_features(&mp
->m_sb
, feature
);
1296 xfs_buf_relse(mp
->m_sb_bp
);
1298 /* Log the superblock to disk. */
1299 return xfs_sync_sb(mp
, false);
1301 xfs_force_shutdown(mp
, SHUTDOWN_META_IO_ERROR
);
1303 xfs_buf_relse(mp
->m_sb_bp
);
1308 * Clear all the log incompat flags from the superblock.
1310 * The caller cannot be in a transaction, must ensure that the log does not
1311 * contain any log items protected by any log incompat bit, and must ensure
1312 * that there are no other threads that depend on the state of the log incompat
1313 * feature flags in the primary super.
1315 * Returns true if the superblock is dirty.
1318 xfs_clear_incompat_log_features(
1319 struct xfs_mount
*mp
)
1323 if (!xfs_has_crc(mp
) ||
1324 !xfs_sb_has_incompat_log_feature(&mp
->m_sb
,
1325 XFS_SB_FEAT_INCOMPAT_LOG_ALL
) ||
1326 xfs_is_shutdown(mp
))
1330 * Update the incore superblock. We synchronize on the primary super
1331 * buffer lock to be consistent with the add function, though at least
1332 * in theory this shouldn't be necessary.
1334 xfs_buf_lock(mp
->m_sb_bp
);
1335 xfs_buf_hold(mp
->m_sb_bp
);
1337 if (xfs_sb_has_incompat_log_feature(&mp
->m_sb
,
1338 XFS_SB_FEAT_INCOMPAT_LOG_ALL
)) {
1339 xfs_info(mp
, "Clearing log incompat feature flags.");
1340 xfs_sb_remove_incompat_log_features(&mp
->m_sb
);
1344 xfs_buf_relse(mp
->m_sb_bp
);
1349 * Update the in-core delayed block counter.
1351 * We prefer to update the counter without having to take a spinlock for every
1352 * counter update (i.e. batching). Each change to delayed allocation
1353 * reservations can change can easily exceed the default percpu counter
1354 * batching, so we use a larger batch factor here.
1356 * Note that we don't currently have any callers requiring fast summation
1357 * (e.g. percpu_counter_read) so we can use a big batch value here.
1359 #define XFS_DELALLOC_BATCH (4096)
1362 struct xfs_mount
*mp
,
1365 percpu_counter_add_batch(&mp
->m_delalloc_blks
, delta
,
1366 XFS_DELALLOC_BATCH
);