]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/xfs/scrub/agheader.c
1f71793f7db454dde089eef71babf08edac1088f
[mirror_ubuntu-jammy-kernel.git] / fs / xfs / scrub / agheader.c
1 /*
2 * Copyright (C) 2017 Oracle. All Rights Reserved.
3 *
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_btree.h"
28 #include "xfs_bit.h"
29 #include "xfs_log_format.h"
30 #include "xfs_trans.h"
31 #include "xfs_sb.h"
32 #include "xfs_inode.h"
33 #include "xfs_alloc.h"
34 #include "xfs_ialloc.h"
35 #include "xfs_rmap.h"
36 #include "scrub/xfs_scrub.h"
37 #include "scrub/scrub.h"
38 #include "scrub/common.h"
39 #include "scrub/trace.h"
40
41 /* Superblock */
42
43 /* Cross-reference with the other btrees. */
44 STATIC void
45 xfs_scrub_superblock_xref(
46 struct xfs_scrub_context *sc,
47 struct xfs_buf *bp)
48 {
49 struct xfs_owner_info oinfo;
50 struct xfs_mount *mp = sc->mp;
51 xfs_agnumber_t agno = sc->sm->sm_agno;
52 xfs_agblock_t agbno;
53 int error;
54
55 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
56 return;
57
58 agbno = XFS_SB_BLOCK(mp);
59
60 error = xfs_scrub_ag_init(sc, agno, &sc->sa);
61 if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
62 return;
63
64 xfs_scrub_xref_is_used_space(sc, agbno, 1);
65 xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
66 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
67 xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
68 xfs_scrub_xref_is_not_shared(sc, agbno, 1);
69
70 /* scrub teardown will take care of sc->sa for us */
71 }
72
73 /*
74 * Scrub the filesystem superblock.
75 *
76 * Note: We do /not/ attempt to check AG 0's superblock. Mount is
77 * responsible for validating all the geometry information in sb 0, so
78 * if the filesystem is capable of initiating online scrub, then clearly
79 * sb 0 is ok and we can use its information to check everything else.
80 */
81 int
82 xfs_scrub_superblock(
83 struct xfs_scrub_context *sc)
84 {
85 struct xfs_mount *mp = sc->mp;
86 struct xfs_buf *bp;
87 struct xfs_dsb *sb;
88 xfs_agnumber_t agno;
89 uint32_t v2_ok;
90 __be32 features_mask;
91 int error;
92 __be16 vernum_mask;
93
94 agno = sc->sm->sm_agno;
95 if (agno == 0)
96 return 0;
97
98 error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp);
99 /*
100 * The superblock verifier can return several different error codes
101 * if it thinks the superblock doesn't look right. For a mount these
102 * would all get bounced back to userspace, but if we're here then the
103 * fs mounted successfully, which means that this secondary superblock
104 * is simply incorrect. Treat all these codes the same way we treat
105 * any corruption.
106 */
107 switch (error) {
108 case -EINVAL: /* also -EWRONGFS */
109 case -ENOSYS:
110 case -EFBIG:
111 error = -EFSCORRUPTED;
112 default:
113 break;
114 }
115 if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
116 return error;
117
118 sb = XFS_BUF_TO_SBP(bp);
119
120 /*
121 * Verify the geometries match. Fields that are permanently
122 * set by mkfs are checked; fields that can be updated later
123 * (and are not propagated to backup superblocks) are preen
124 * checked.
125 */
126 if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
127 xfs_scrub_block_set_corrupt(sc, bp);
128
129 if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
130 xfs_scrub_block_set_corrupt(sc, bp);
131
132 if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
133 xfs_scrub_block_set_corrupt(sc, bp);
134
135 if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
136 xfs_scrub_block_set_corrupt(sc, bp);
137
138 if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
139 xfs_scrub_block_set_preen(sc, bp);
140
141 if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
142 xfs_scrub_block_set_corrupt(sc, bp);
143
144 if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
145 xfs_scrub_block_set_preen(sc, bp);
146
147 if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
148 xfs_scrub_block_set_preen(sc, bp);
149
150 if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
151 xfs_scrub_block_set_preen(sc, bp);
152
153 if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
154 xfs_scrub_block_set_corrupt(sc, bp);
155
156 if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
157 xfs_scrub_block_set_corrupt(sc, bp);
158
159 if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
160 xfs_scrub_block_set_corrupt(sc, bp);
161
162 if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
163 xfs_scrub_block_set_corrupt(sc, bp);
164
165 if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
166 xfs_scrub_block_set_corrupt(sc, bp);
167
168 /* Check sb_versionnum bits that are set at mkfs time. */
169 vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
170 XFS_SB_VERSION_NUMBITS |
171 XFS_SB_VERSION_ALIGNBIT |
172 XFS_SB_VERSION_DALIGNBIT |
173 XFS_SB_VERSION_SHAREDBIT |
174 XFS_SB_VERSION_LOGV2BIT |
175 XFS_SB_VERSION_SECTORBIT |
176 XFS_SB_VERSION_EXTFLGBIT |
177 XFS_SB_VERSION_DIRV2BIT);
178 if ((sb->sb_versionnum & vernum_mask) !=
179 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
180 xfs_scrub_block_set_corrupt(sc, bp);
181
182 /* Check sb_versionnum bits that can be set after mkfs time. */
183 vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
184 XFS_SB_VERSION_NLINKBIT |
185 XFS_SB_VERSION_QUOTABIT);
186 if ((sb->sb_versionnum & vernum_mask) !=
187 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
188 xfs_scrub_block_set_preen(sc, bp);
189
190 if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
191 xfs_scrub_block_set_corrupt(sc, bp);
192
193 if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
194 xfs_scrub_block_set_corrupt(sc, bp);
195
196 if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
197 xfs_scrub_block_set_corrupt(sc, bp);
198
199 if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
200 xfs_scrub_block_set_preen(sc, bp);
201
202 if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
203 xfs_scrub_block_set_corrupt(sc, bp);
204
205 if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
206 xfs_scrub_block_set_corrupt(sc, bp);
207
208 if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
209 xfs_scrub_block_set_corrupt(sc, bp);
210
211 if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
212 xfs_scrub_block_set_corrupt(sc, bp);
213
214 if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
215 xfs_scrub_block_set_corrupt(sc, bp);
216
217 if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
218 xfs_scrub_block_set_corrupt(sc, bp);
219
220 if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
221 xfs_scrub_block_set_preen(sc, bp);
222
223 /*
224 * Skip the summary counters since we track them in memory anyway.
225 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
226 */
227
228 if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
229 xfs_scrub_block_set_preen(sc, bp);
230
231 if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
232 xfs_scrub_block_set_preen(sc, bp);
233
234 /*
235 * Skip the quota flags since repair will force quotacheck.
236 * sb_qflags
237 */
238
239 if (sb->sb_flags != mp->m_sb.sb_flags)
240 xfs_scrub_block_set_corrupt(sc, bp);
241
242 if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
243 xfs_scrub_block_set_corrupt(sc, bp);
244
245 if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
246 xfs_scrub_block_set_corrupt(sc, bp);
247
248 if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
249 xfs_scrub_block_set_preen(sc, bp);
250
251 if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
252 xfs_scrub_block_set_preen(sc, bp);
253
254 if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
255 xfs_scrub_block_set_corrupt(sc, bp);
256
257 if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
258 xfs_scrub_block_set_corrupt(sc, bp);
259
260 if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
261 xfs_scrub_block_set_corrupt(sc, bp);
262
263 if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
264 xfs_scrub_block_set_corrupt(sc, bp);
265
266 /* Do we see any invalid bits in sb_features2? */
267 if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
268 if (sb->sb_features2 != 0)
269 xfs_scrub_block_set_corrupt(sc, bp);
270 } else {
271 v2_ok = XFS_SB_VERSION2_OKBITS;
272 if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
273 v2_ok |= XFS_SB_VERSION2_CRCBIT;
274
275 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
276 xfs_scrub_block_set_corrupt(sc, bp);
277
278 if (sb->sb_features2 != sb->sb_bad_features2)
279 xfs_scrub_block_set_preen(sc, bp);
280 }
281
282 /* Check sb_features2 flags that are set at mkfs time. */
283 features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
284 XFS_SB_VERSION2_PROJID32BIT |
285 XFS_SB_VERSION2_CRCBIT |
286 XFS_SB_VERSION2_FTYPE);
287 if ((sb->sb_features2 & features_mask) !=
288 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
289 xfs_scrub_block_set_corrupt(sc, bp);
290
291 /* Check sb_features2 flags that can be set after mkfs time. */
292 features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
293 if ((sb->sb_features2 & features_mask) !=
294 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
295 xfs_scrub_block_set_corrupt(sc, bp);
296
297 if (!xfs_sb_version_hascrc(&mp->m_sb)) {
298 /* all v5 fields must be zero */
299 if (memchr_inv(&sb->sb_features_compat, 0,
300 sizeof(struct xfs_dsb) -
301 offsetof(struct xfs_dsb, sb_features_compat)))
302 xfs_scrub_block_set_corrupt(sc, bp);
303 } else {
304 /* Check compat flags; all are set at mkfs time. */
305 features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
306 if ((sb->sb_features_compat & features_mask) !=
307 (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
308 xfs_scrub_block_set_corrupt(sc, bp);
309
310 /* Check ro compat flags; all are set at mkfs time. */
311 features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
312 XFS_SB_FEAT_RO_COMPAT_FINOBT |
313 XFS_SB_FEAT_RO_COMPAT_RMAPBT |
314 XFS_SB_FEAT_RO_COMPAT_REFLINK);
315 if ((sb->sb_features_ro_compat & features_mask) !=
316 (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
317 features_mask))
318 xfs_scrub_block_set_corrupt(sc, bp);
319
320 /* Check incompat flags; all are set at mkfs time. */
321 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
322 XFS_SB_FEAT_INCOMPAT_FTYPE |
323 XFS_SB_FEAT_INCOMPAT_SPINODES |
324 XFS_SB_FEAT_INCOMPAT_META_UUID);
325 if ((sb->sb_features_incompat & features_mask) !=
326 (cpu_to_be32(mp->m_sb.sb_features_incompat) &
327 features_mask))
328 xfs_scrub_block_set_corrupt(sc, bp);
329
330 /* Check log incompat flags; all are set at mkfs time. */
331 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
332 if ((sb->sb_features_log_incompat & features_mask) !=
333 (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
334 features_mask))
335 xfs_scrub_block_set_corrupt(sc, bp);
336
337 /* Don't care about sb_crc */
338
339 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
340 xfs_scrub_block_set_corrupt(sc, bp);
341
342 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
343 xfs_scrub_block_set_preen(sc, bp);
344
345 /* Don't care about sb_lsn */
346 }
347
348 if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
349 /* The metadata UUID must be the same for all supers */
350 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
351 xfs_scrub_block_set_corrupt(sc, bp);
352 }
353
354 /* Everything else must be zero. */
355 if (memchr_inv(sb + 1, 0,
356 BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
357 xfs_scrub_block_set_corrupt(sc, bp);
358
359 xfs_scrub_superblock_xref(sc, bp);
360
361 return error;
362 }
363
364 /* AGF */
365
366 /* Tally freespace record lengths. */
367 STATIC int
368 xfs_scrub_agf_record_bno_lengths(
369 struct xfs_btree_cur *cur,
370 struct xfs_alloc_rec_incore *rec,
371 void *priv)
372 {
373 xfs_extlen_t *blocks = priv;
374
375 (*blocks) += rec->ar_blockcount;
376 return 0;
377 }
378
379 /* Check agf_freeblks */
380 static inline void
381 xfs_scrub_agf_xref_freeblks(
382 struct xfs_scrub_context *sc)
383 {
384 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
385 xfs_extlen_t blocks = 0;
386 int error;
387
388 if (!sc->sa.bno_cur)
389 return;
390
391 error = xfs_alloc_query_all(sc->sa.bno_cur,
392 xfs_scrub_agf_record_bno_lengths, &blocks);
393 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
394 return;
395 if (blocks != be32_to_cpu(agf->agf_freeblks))
396 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
397 }
398
399 /* Cross reference the AGF with the cntbt (freespace by length btree) */
400 static inline void
401 xfs_scrub_agf_xref_cntbt(
402 struct xfs_scrub_context *sc)
403 {
404 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
405 xfs_agblock_t agbno;
406 xfs_extlen_t blocks;
407 int have;
408 int error;
409
410 if (!sc->sa.cnt_cur)
411 return;
412
413 /* Any freespace at all? */
414 error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
415 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
416 return;
417 if (!have) {
418 if (agf->agf_freeblks != be32_to_cpu(0))
419 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
420 return;
421 }
422
423 /* Check agf_longest */
424 error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
425 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
426 return;
427 if (!have || blocks != be32_to_cpu(agf->agf_longest))
428 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
429 }
430
431 /* Check the btree block counts in the AGF against the btrees. */
432 STATIC void
433 xfs_scrub_agf_xref_btreeblks(
434 struct xfs_scrub_context *sc)
435 {
436 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
437 struct xfs_mount *mp = sc->mp;
438 xfs_agblock_t blocks;
439 xfs_agblock_t btreeblks;
440 int error;
441
442 /* Check agf_rmap_blocks; set up for agf_btreeblks check */
443 if (sc->sa.rmap_cur) {
444 error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
445 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
446 return;
447 btreeblks = blocks - 1;
448 if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
449 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
450 } else {
451 btreeblks = 0;
452 }
453
454 /*
455 * No rmap cursor; we can't xref if we have the rmapbt feature.
456 * We also can't do it if we're missing the free space btree cursors.
457 */
458 if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
459 !sc->sa.bno_cur || !sc->sa.cnt_cur)
460 return;
461
462 /* Check agf_btreeblks */
463 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
464 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
465 return;
466 btreeblks += blocks - 1;
467
468 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
469 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
470 return;
471 btreeblks += blocks - 1;
472
473 if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
474 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
475 }
476
477 /* Check agf_refcount_blocks against tree size */
478 static inline void
479 xfs_scrub_agf_xref_refcblks(
480 struct xfs_scrub_context *sc)
481 {
482 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
483 xfs_agblock_t blocks;
484 int error;
485
486 if (!sc->sa.refc_cur)
487 return;
488
489 error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
490 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
491 return;
492 if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
493 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
494 }
495
496 /* Cross-reference with the other btrees. */
497 STATIC void
498 xfs_scrub_agf_xref(
499 struct xfs_scrub_context *sc)
500 {
501 struct xfs_owner_info oinfo;
502 struct xfs_mount *mp = sc->mp;
503 xfs_agblock_t agbno;
504 int error;
505
506 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
507 return;
508
509 agbno = XFS_AGF_BLOCK(mp);
510
511 error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
512 if (error)
513 return;
514
515 xfs_scrub_xref_is_used_space(sc, agbno, 1);
516 xfs_scrub_agf_xref_freeblks(sc);
517 xfs_scrub_agf_xref_cntbt(sc);
518 xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
519 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
520 xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
521 xfs_scrub_agf_xref_btreeblks(sc);
522 xfs_scrub_xref_is_not_shared(sc, agbno, 1);
523 xfs_scrub_agf_xref_refcblks(sc);
524
525 /* scrub teardown will take care of sc->sa for us */
526 }
527
528 /* Scrub the AGF. */
529 int
530 xfs_scrub_agf(
531 struct xfs_scrub_context *sc)
532 {
533 struct xfs_mount *mp = sc->mp;
534 struct xfs_agf *agf;
535 xfs_agnumber_t agno;
536 xfs_agblock_t agbno;
537 xfs_agblock_t eoag;
538 xfs_agblock_t agfl_first;
539 xfs_agblock_t agfl_last;
540 xfs_agblock_t agfl_count;
541 xfs_agblock_t fl_count;
542 int level;
543 int error = 0;
544
545 agno = sc->sa.agno = sc->sm->sm_agno;
546 error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
547 &sc->sa.agf_bp, &sc->sa.agfl_bp);
548 if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
549 goto out;
550 xfs_scrub_buffer_recheck(sc, sc->sa.agf_bp);
551
552 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
553
554 /* Check the AG length */
555 eoag = be32_to_cpu(agf->agf_length);
556 if (eoag != xfs_ag_block_count(mp, agno))
557 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
558
559 /* Check the AGF btree roots and levels */
560 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
561 if (!xfs_verify_agbno(mp, agno, agbno))
562 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
563
564 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
565 if (!xfs_verify_agbno(mp, agno, agbno))
566 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
567
568 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
569 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
570 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
571
572 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
573 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
574 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
575
576 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
577 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
578 if (!xfs_verify_agbno(mp, agno, agbno))
579 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
580
581 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
582 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
583 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
584 }
585
586 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
587 agbno = be32_to_cpu(agf->agf_refcount_root);
588 if (!xfs_verify_agbno(mp, agno, agbno))
589 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
590
591 level = be32_to_cpu(agf->agf_refcount_level);
592 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
593 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
594 }
595
596 /* Check the AGFL counters */
597 agfl_first = be32_to_cpu(agf->agf_flfirst);
598 agfl_last = be32_to_cpu(agf->agf_fllast);
599 agfl_count = be32_to_cpu(agf->agf_flcount);
600 if (agfl_last > agfl_first)
601 fl_count = agfl_last - agfl_first + 1;
602 else
603 fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
604 if (agfl_count != 0 && fl_count != agfl_count)
605 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
606
607 xfs_scrub_agf_xref(sc);
608 out:
609 return error;
610 }
611
612 /* AGFL */
613
614 struct xfs_scrub_agfl_info {
615 struct xfs_owner_info oinfo;
616 unsigned int sz_entries;
617 unsigned int nr_entries;
618 xfs_agblock_t *entries;
619 struct xfs_scrub_context *sc;
620 };
621
622 /* Cross-reference with the other btrees. */
623 STATIC void
624 xfs_scrub_agfl_block_xref(
625 struct xfs_scrub_context *sc,
626 xfs_agblock_t agbno,
627 struct xfs_owner_info *oinfo)
628 {
629 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
630 return;
631
632 xfs_scrub_xref_is_used_space(sc, agbno, 1);
633 xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
634 xfs_scrub_xref_is_owned_by(sc, agbno, 1, oinfo);
635 xfs_scrub_xref_is_not_shared(sc, agbno, 1);
636 }
637
638 /* Scrub an AGFL block. */
639 STATIC int
640 xfs_scrub_agfl_block(
641 struct xfs_mount *mp,
642 xfs_agblock_t agbno,
643 void *priv)
644 {
645 struct xfs_scrub_agfl_info *sai = priv;
646 struct xfs_scrub_context *sc = sai->sc;
647 xfs_agnumber_t agno = sc->sa.agno;
648
649 if (xfs_verify_agbno(mp, agno, agbno) &&
650 sai->nr_entries < sai->sz_entries)
651 sai->entries[sai->nr_entries++] = agbno;
652 else
653 xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp);
654
655 xfs_scrub_agfl_block_xref(sc, agbno, priv);
656
657 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
658 return XFS_BTREE_QUERY_RANGE_ABORT;
659
660 return 0;
661 }
662
663 static int
664 xfs_scrub_agblock_cmp(
665 const void *pa,
666 const void *pb)
667 {
668 const xfs_agblock_t *a = pa;
669 const xfs_agblock_t *b = pb;
670
671 return (int)*a - (int)*b;
672 }
673
674 /* Cross-reference with the other btrees. */
675 STATIC void
676 xfs_scrub_agfl_xref(
677 struct xfs_scrub_context *sc)
678 {
679 struct xfs_owner_info oinfo;
680 struct xfs_mount *mp = sc->mp;
681 xfs_agblock_t agbno;
682 int error;
683
684 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
685 return;
686
687 agbno = XFS_AGFL_BLOCK(mp);
688
689 error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
690 if (error)
691 return;
692
693 xfs_scrub_xref_is_used_space(sc, agbno, 1);
694 xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
695 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
696 xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
697 xfs_scrub_xref_is_not_shared(sc, agbno, 1);
698
699 /*
700 * Scrub teardown will take care of sc->sa for us. Leave sc->sa
701 * active so that the agfl block xref can use it too.
702 */
703 }
704
705 /* Scrub the AGFL. */
706 int
707 xfs_scrub_agfl(
708 struct xfs_scrub_context *sc)
709 {
710 struct xfs_scrub_agfl_info sai;
711 struct xfs_agf *agf;
712 xfs_agnumber_t agno;
713 unsigned int agflcount;
714 unsigned int i;
715 int error;
716
717 agno = sc->sa.agno = sc->sm->sm_agno;
718 error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
719 &sc->sa.agf_bp, &sc->sa.agfl_bp);
720 if (!xfs_scrub_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
721 goto out;
722 if (!sc->sa.agf_bp)
723 return -EFSCORRUPTED;
724 xfs_scrub_buffer_recheck(sc, sc->sa.agfl_bp);
725
726 xfs_scrub_agfl_xref(sc);
727
728 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
729 goto out;
730
731 /* Allocate buffer to ensure uniqueness of AGFL entries. */
732 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
733 agflcount = be32_to_cpu(agf->agf_flcount);
734 if (agflcount > xfs_agfl_size(sc->mp)) {
735 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
736 goto out;
737 }
738 memset(&sai, 0, sizeof(sai));
739 sai.sc = sc;
740 sai.sz_entries = agflcount;
741 sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount,
742 KM_MAYFAIL);
743 if (!sai.entries) {
744 error = -ENOMEM;
745 goto out;
746 }
747
748 /* Check the blocks in the AGFL. */
749 xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG);
750 error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
751 sc->sa.agfl_bp, xfs_scrub_agfl_block, &sai);
752 if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
753 error = 0;
754 goto out_free;
755 }
756 if (error)
757 goto out_free;
758
759 if (agflcount != sai.nr_entries) {
760 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
761 goto out_free;
762 }
763
764 /* Sort entries, check for duplicates. */
765 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
766 xfs_scrub_agblock_cmp, NULL);
767 for (i = 1; i < sai.nr_entries; i++) {
768 if (sai.entries[i] == sai.entries[i - 1]) {
769 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
770 break;
771 }
772 }
773
774 out_free:
775 kmem_free(sai.entries);
776 out:
777 return error;
778 }
779
780 /* AGI */
781
782 /* Check agi_count/agi_freecount */
783 static inline void
784 xfs_scrub_agi_xref_icounts(
785 struct xfs_scrub_context *sc)
786 {
787 struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
788 xfs_agino_t icount;
789 xfs_agino_t freecount;
790 int error;
791
792 if (!sc->sa.ino_cur)
793 return;
794
795 error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
796 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.ino_cur))
797 return;
798 if (be32_to_cpu(agi->agi_count) != icount ||
799 be32_to_cpu(agi->agi_freecount) != freecount)
800 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agi_bp);
801 }
802
803 /* Cross-reference with the other btrees. */
804 STATIC void
805 xfs_scrub_agi_xref(
806 struct xfs_scrub_context *sc)
807 {
808 struct xfs_owner_info oinfo;
809 struct xfs_mount *mp = sc->mp;
810 xfs_agblock_t agbno;
811 int error;
812
813 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
814 return;
815
816 agbno = XFS_AGI_BLOCK(mp);
817
818 error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
819 if (error)
820 return;
821
822 xfs_scrub_xref_is_used_space(sc, agbno, 1);
823 xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
824 xfs_scrub_agi_xref_icounts(sc);
825 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
826 xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
827 xfs_scrub_xref_is_not_shared(sc, agbno, 1);
828
829 /* scrub teardown will take care of sc->sa for us */
830 }
831
832 /* Scrub the AGI. */
833 int
834 xfs_scrub_agi(
835 struct xfs_scrub_context *sc)
836 {
837 struct xfs_mount *mp = sc->mp;
838 struct xfs_agi *agi;
839 xfs_agnumber_t agno;
840 xfs_agblock_t agbno;
841 xfs_agblock_t eoag;
842 xfs_agino_t agino;
843 xfs_agino_t first_agino;
844 xfs_agino_t last_agino;
845 xfs_agino_t icount;
846 int i;
847 int level;
848 int error = 0;
849
850 agno = sc->sa.agno = sc->sm->sm_agno;
851 error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
852 &sc->sa.agf_bp, &sc->sa.agfl_bp);
853 if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
854 goto out;
855 xfs_scrub_buffer_recheck(sc, sc->sa.agi_bp);
856
857 agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
858
859 /* Check the AG length */
860 eoag = be32_to_cpu(agi->agi_length);
861 if (eoag != xfs_ag_block_count(mp, agno))
862 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
863
864 /* Check btree roots and levels */
865 agbno = be32_to_cpu(agi->agi_root);
866 if (!xfs_verify_agbno(mp, agno, agbno))
867 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
868
869 level = be32_to_cpu(agi->agi_level);
870 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
871 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
872
873 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
874 agbno = be32_to_cpu(agi->agi_free_root);
875 if (!xfs_verify_agbno(mp, agno, agbno))
876 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
877
878 level = be32_to_cpu(agi->agi_free_level);
879 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
880 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
881 }
882
883 /* Check inode counters */
884 xfs_ialloc_agino_range(mp, agno, &first_agino, &last_agino);
885 icount = be32_to_cpu(agi->agi_count);
886 if (icount > last_agino - first_agino + 1 ||
887 icount < be32_to_cpu(agi->agi_freecount))
888 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
889
890 /* Check inode pointers */
891 agino = be32_to_cpu(agi->agi_newino);
892 if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
893 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
894
895 agino = be32_to_cpu(agi->agi_dirino);
896 if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
897 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
898
899 /* Check unlinked inode buckets */
900 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
901 agino = be32_to_cpu(agi->agi_unlinked[i]);
902 if (agino == NULLAGINO)
903 continue;
904 if (!xfs_verify_agino(mp, agno, agino))
905 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
906 }
907
908 if (agi->agi_pad32 != cpu_to_be32(0))
909 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
910
911 xfs_scrub_agi_xref(sc);
912 out:
913 return error;
914 }