1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_sb.h"
14 #include "xfs_alloc.h"
15 #include "xfs_ialloc.h"
16 #include "xfs_rmap.h"
17 #include "scrub/scrub.h"
18 #include "scrub/common.h"
19
20 /* Superblock */
21
22 /* Cross-reference with the other btrees. */
23 STATIC void
xchk_superblock_xref(struct xfs_scrub * sc,struct xfs_buf * bp)24 xchk_superblock_xref(
25 struct xfs_scrub *sc,
26 struct xfs_buf *bp)
27 {
28 struct xfs_mount *mp = sc->mp;
29 xfs_agnumber_t agno = sc->sm->sm_agno;
30 xfs_agblock_t agbno;
31 int error;
32
33 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
34 return;
35
36 agbno = XFS_SB_BLOCK(mp);
37
38 error = xchk_ag_init(sc, agno, &sc->sa);
39 if (!xchk_xref_process_error(sc, agno, agbno, &error))
40 return;
41
42 xchk_xref_is_used_space(sc, agbno, 1);
43 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
44 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
45 xchk_xref_is_not_shared(sc, agbno, 1);
46
47 /* scrub teardown will take care of sc->sa for us */
48 }
49
50 /*
51 * Scrub the filesystem superblock.
52 *
53 * Note: We do /not/ attempt to check AG 0's superblock. Mount is
54 * responsible for validating all the geometry information in sb 0, so
55 * if the filesystem is capable of initiating online scrub, then clearly
56 * sb 0 is ok and we can use its information to check everything else.
57 */
58 int
xchk_superblock(struct xfs_scrub * sc)59 xchk_superblock(
60 struct xfs_scrub *sc)
61 {
62 struct xfs_mount *mp = sc->mp;
63 struct xfs_buf *bp;
64 struct xfs_dsb *sb;
65 xfs_agnumber_t agno;
66 uint32_t v2_ok;
67 __be32 features_mask;
68 int error;
69 __be16 vernum_mask;
70
71 agno = sc->sm->sm_agno;
72 if (agno == 0)
73 return 0;
74
75 error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp);
76 /*
77 * The superblock verifier can return several different error codes
78 * if it thinks the superblock doesn't look right. For a mount these
79 * would all get bounced back to userspace, but if we're here then the
80 * fs mounted successfully, which means that this secondary superblock
81 * is simply incorrect. Treat all these codes the same way we treat
82 * any corruption.
83 */
84 switch (error) {
85 case -EINVAL: /* also -EWRONGFS */
86 case -ENOSYS:
87 case -EFBIG:
88 error = -EFSCORRUPTED;
89 default:
90 break;
91 }
92 if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
93 return error;
94
95 sb = XFS_BUF_TO_SBP(bp);
96
97 /*
98 * Verify the geometries match. Fields that are permanently
99 * set by mkfs are checked; fields that can be updated later
100 * (and are not propagated to backup superblocks) are preen
101 * checked.
102 */
103 if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
104 xchk_block_set_corrupt(sc, bp);
105
106 if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
107 xchk_block_set_corrupt(sc, bp);
108
109 if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
110 xchk_block_set_corrupt(sc, bp);
111
112 if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
113 xchk_block_set_corrupt(sc, bp);
114
115 if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
116 xchk_block_set_preen(sc, bp);
117
118 if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
119 xchk_block_set_corrupt(sc, bp);
120
121 if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
122 xchk_block_set_preen(sc, bp);
123
124 if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
125 xchk_block_set_preen(sc, bp);
126
127 if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
128 xchk_block_set_preen(sc, bp);
129
130 if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
131 xchk_block_set_corrupt(sc, bp);
132
133 if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
134 xchk_block_set_corrupt(sc, bp);
135
136 if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
137 xchk_block_set_corrupt(sc, bp);
138
139 if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
140 xchk_block_set_corrupt(sc, bp);
141
142 if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
143 xchk_block_set_corrupt(sc, bp);
144
145 /* Check sb_versionnum bits that are set at mkfs time. */
146 vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
147 XFS_SB_VERSION_NUMBITS |
148 XFS_SB_VERSION_ALIGNBIT |
149 XFS_SB_VERSION_DALIGNBIT |
150 XFS_SB_VERSION_SHAREDBIT |
151 XFS_SB_VERSION_LOGV2BIT |
152 XFS_SB_VERSION_SECTORBIT |
153 XFS_SB_VERSION_EXTFLGBIT |
154 XFS_SB_VERSION_DIRV2BIT);
155 if ((sb->sb_versionnum & vernum_mask) !=
156 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
157 xchk_block_set_corrupt(sc, bp);
158
159 /* Check sb_versionnum bits that can be set after mkfs time. */
160 vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
161 XFS_SB_VERSION_NLINKBIT |
162 XFS_SB_VERSION_QUOTABIT);
163 if ((sb->sb_versionnum & vernum_mask) !=
164 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
165 xchk_block_set_preen(sc, bp);
166
167 if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
168 xchk_block_set_corrupt(sc, bp);
169
170 if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
171 xchk_block_set_corrupt(sc, bp);
172
173 if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
174 xchk_block_set_corrupt(sc, bp);
175
176 if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
177 xchk_block_set_preen(sc, bp);
178
179 if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
180 xchk_block_set_corrupt(sc, bp);
181
182 if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
183 xchk_block_set_corrupt(sc, bp);
184
185 if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
186 xchk_block_set_corrupt(sc, bp);
187
188 if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
189 xchk_block_set_corrupt(sc, bp);
190
191 if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
192 xchk_block_set_corrupt(sc, bp);
193
194 if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
195 xchk_block_set_corrupt(sc, bp);
196
197 if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
198 xchk_block_set_preen(sc, bp);
199
200 /*
201 * Skip the summary counters since we track them in memory anyway.
202 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
203 */
204
205 if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
206 xchk_block_set_preen(sc, bp);
207
208 if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
209 xchk_block_set_preen(sc, bp);
210
211 /*
212 * Skip the quota flags since repair will force quotacheck.
213 * sb_qflags
214 */
215
216 if (sb->sb_flags != mp->m_sb.sb_flags)
217 xchk_block_set_corrupt(sc, bp);
218
219 if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
220 xchk_block_set_corrupt(sc, bp);
221
222 if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
223 xchk_block_set_corrupt(sc, bp);
224
225 if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
226 xchk_block_set_preen(sc, bp);
227
228 if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
229 xchk_block_set_preen(sc, bp);
230
231 if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
232 xchk_block_set_corrupt(sc, bp);
233
234 if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
235 xchk_block_set_corrupt(sc, bp);
236
237 if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
238 xchk_block_set_corrupt(sc, bp);
239
240 if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
241 xchk_block_set_corrupt(sc, bp);
242
243 /* Do we see any invalid bits in sb_features2? */
244 if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
245 if (sb->sb_features2 != 0)
246 xchk_block_set_corrupt(sc, bp);
247 } else {
248 v2_ok = XFS_SB_VERSION2_OKBITS;
249 if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
250 v2_ok |= XFS_SB_VERSION2_CRCBIT;
251
252 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
253 xchk_block_set_corrupt(sc, bp);
254
255 if (sb->sb_features2 != sb->sb_bad_features2)
256 xchk_block_set_preen(sc, bp);
257 }
258
259 /* Check sb_features2 flags that are set at mkfs time. */
260 features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
261 XFS_SB_VERSION2_PROJID32BIT |
262 XFS_SB_VERSION2_CRCBIT |
263 XFS_SB_VERSION2_FTYPE);
264 if ((sb->sb_features2 & features_mask) !=
265 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
266 xchk_block_set_corrupt(sc, bp);
267
268 /* Check sb_features2 flags that can be set after mkfs time. */
269 features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
270 if ((sb->sb_features2 & features_mask) !=
271 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
272 xchk_block_set_corrupt(sc, bp);
273
274 if (!xfs_sb_version_hascrc(&mp->m_sb)) {
275 /* all v5 fields must be zero */
276 if (memchr_inv(&sb->sb_features_compat, 0,
277 sizeof(struct xfs_dsb) -
278 offsetof(struct xfs_dsb, sb_features_compat)))
279 xchk_block_set_corrupt(sc, bp);
280 } else {
281 /* Check compat flags; all are set at mkfs time. */
282 features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
283 if ((sb->sb_features_compat & features_mask) !=
284 (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
285 xchk_block_set_corrupt(sc, bp);
286
287 /* Check ro compat flags; all are set at mkfs time. */
288 features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
289 XFS_SB_FEAT_RO_COMPAT_FINOBT |
290 XFS_SB_FEAT_RO_COMPAT_RMAPBT |
291 XFS_SB_FEAT_RO_COMPAT_REFLINK);
292 if ((sb->sb_features_ro_compat & features_mask) !=
293 (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
294 features_mask))
295 xchk_block_set_corrupt(sc, bp);
296
297 /* Check incompat flags; all are set at mkfs time. */
298 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
299 XFS_SB_FEAT_INCOMPAT_FTYPE |
300 XFS_SB_FEAT_INCOMPAT_SPINODES |
301 XFS_SB_FEAT_INCOMPAT_META_UUID);
302 if ((sb->sb_features_incompat & features_mask) !=
303 (cpu_to_be32(mp->m_sb.sb_features_incompat) &
304 features_mask))
305 xchk_block_set_corrupt(sc, bp);
306
307 /* Check log incompat flags; all are set at mkfs time. */
308 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
309 if ((sb->sb_features_log_incompat & features_mask) !=
310 (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
311 features_mask))
312 xchk_block_set_corrupt(sc, bp);
313
314 /* Don't care about sb_crc */
315
316 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
317 xchk_block_set_corrupt(sc, bp);
318
319 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
320 xchk_block_set_preen(sc, bp);
321
322 /* Don't care about sb_lsn */
323 }
324
325 if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
326 /* The metadata UUID must be the same for all supers */
327 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
328 xchk_block_set_corrupt(sc, bp);
329 }
330
331 /* Everything else must be zero. */
332 if (memchr_inv(sb + 1, 0,
333 BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
334 xchk_block_set_corrupt(sc, bp);
335
336 xchk_superblock_xref(sc, bp);
337
338 return error;
339 }
340
341 /* AGF */
342
343 /* Tally freespace record lengths. */
344 STATIC int
xchk_agf_record_bno_lengths(struct xfs_btree_cur * cur,struct xfs_alloc_rec_incore * rec,void * priv)345 xchk_agf_record_bno_lengths(
346 struct xfs_btree_cur *cur,
347 struct xfs_alloc_rec_incore *rec,
348 void *priv)
349 {
350 xfs_extlen_t *blocks = priv;
351
352 (*blocks) += rec->ar_blockcount;
353 return 0;
354 }
355
356 /* Check agf_freeblks */
357 static inline void
xchk_agf_xref_freeblks(struct xfs_scrub * sc)358 xchk_agf_xref_freeblks(
359 struct xfs_scrub *sc)
360 {
361 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
362 xfs_extlen_t blocks = 0;
363 int error;
364
365 if (!sc->sa.bno_cur)
366 return;
367
368 error = xfs_alloc_query_all(sc->sa.bno_cur,
369 xchk_agf_record_bno_lengths, &blocks);
370 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
371 return;
372 if (blocks != be32_to_cpu(agf->agf_freeblks))
373 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
374 }
375
376 /* Cross reference the AGF with the cntbt (freespace by length btree) */
377 static inline void
xchk_agf_xref_cntbt(struct xfs_scrub * sc)378 xchk_agf_xref_cntbt(
379 struct xfs_scrub *sc)
380 {
381 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
382 xfs_agblock_t agbno;
383 xfs_extlen_t blocks;
384 int have;
385 int error;
386
387 if (!sc->sa.cnt_cur)
388 return;
389
390 /* Any freespace at all? */
391 error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
392 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
393 return;
394 if (!have) {
395 if (agf->agf_freeblks != cpu_to_be32(0))
396 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
397 return;
398 }
399
400 /* Check agf_longest */
401 error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
402 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
403 return;
404 if (!have || blocks != be32_to_cpu(agf->agf_longest))
405 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
406 }
407
408 /* Check the btree block counts in the AGF against the btrees. */
409 STATIC void
xchk_agf_xref_btreeblks(struct xfs_scrub * sc)410 xchk_agf_xref_btreeblks(
411 struct xfs_scrub *sc)
412 {
413 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
414 struct xfs_mount *mp = sc->mp;
415 xfs_agblock_t blocks;
416 xfs_agblock_t btreeblks;
417 int error;
418
419 /* Check agf_rmap_blocks; set up for agf_btreeblks check */
420 if (sc->sa.rmap_cur) {
421 error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
422 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
423 return;
424 btreeblks = blocks - 1;
425 if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
426 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
427 } else {
428 btreeblks = 0;
429 }
430
431 /*
432 * No rmap cursor; we can't xref if we have the rmapbt feature.
433 * We also can't do it if we're missing the free space btree cursors.
434 */
435 if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
436 !sc->sa.bno_cur || !sc->sa.cnt_cur)
437 return;
438
439 /* Check agf_btreeblks */
440 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
441 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
442 return;
443 btreeblks += blocks - 1;
444
445 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
446 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
447 return;
448 btreeblks += blocks - 1;
449
450 if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
451 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
452 }
453
454 /* Check agf_refcount_blocks against tree size */
455 static inline void
xchk_agf_xref_refcblks(struct xfs_scrub * sc)456 xchk_agf_xref_refcblks(
457 struct xfs_scrub *sc)
458 {
459 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
460 xfs_agblock_t blocks;
461 int error;
462
463 if (!sc->sa.refc_cur)
464 return;
465
466 error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
467 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
468 return;
469 if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
470 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
471 }
472
473 /* Cross-reference with the other btrees. */
474 STATIC void
xchk_agf_xref(struct xfs_scrub * sc)475 xchk_agf_xref(
476 struct xfs_scrub *sc)
477 {
478 struct xfs_mount *mp = sc->mp;
479 xfs_agblock_t agbno;
480 int error;
481
482 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
483 return;
484
485 agbno = XFS_AGF_BLOCK(mp);
486
487 error = xchk_ag_btcur_init(sc, &sc->sa);
488 if (error)
489 return;
490
491 xchk_xref_is_used_space(sc, agbno, 1);
492 xchk_agf_xref_freeblks(sc);
493 xchk_agf_xref_cntbt(sc);
494 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
495 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
496 xchk_agf_xref_btreeblks(sc);
497 xchk_xref_is_not_shared(sc, agbno, 1);
498 xchk_agf_xref_refcblks(sc);
499
500 /* scrub teardown will take care of sc->sa for us */
501 }
502
503 /* Scrub the AGF. */
504 int
xchk_agf(struct xfs_scrub * sc)505 xchk_agf(
506 struct xfs_scrub *sc)
507 {
508 struct xfs_mount *mp = sc->mp;
509 struct xfs_agf *agf;
510 struct xfs_perag *pag;
511 xfs_agnumber_t agno;
512 xfs_agblock_t agbno;
513 xfs_agblock_t eoag;
514 xfs_agblock_t agfl_first;
515 xfs_agblock_t agfl_last;
516 xfs_agblock_t agfl_count;
517 xfs_agblock_t fl_count;
518 int level;
519 int error = 0;
520
521 agno = sc->sa.agno = sc->sm->sm_agno;
522 error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
523 &sc->sa.agf_bp, &sc->sa.agfl_bp);
524 if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
525 goto out;
526 xchk_buffer_recheck(sc, sc->sa.agf_bp);
527
528 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
529
530 /* Check the AG length */
531 eoag = be32_to_cpu(agf->agf_length);
532 if (eoag != xfs_ag_block_count(mp, agno))
533 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
534
535 /* Check the AGF btree roots and levels */
536 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
537 if (!xfs_verify_agbno(mp, agno, agbno))
538 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
539
540 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
541 if (!xfs_verify_agbno(mp, agno, agbno))
542 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
543
544 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
545 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
546 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
547
548 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
549 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
550 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
551
552 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
553 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
554 if (!xfs_verify_agbno(mp, agno, agbno))
555 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
556
557 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
558 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
559 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
560 }
561
562 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
563 agbno = be32_to_cpu(agf->agf_refcount_root);
564 if (!xfs_verify_agbno(mp, agno, agbno))
565 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
566
567 level = be32_to_cpu(agf->agf_refcount_level);
568 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
569 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
570 }
571
572 /* Check the AGFL counters */
573 agfl_first = be32_to_cpu(agf->agf_flfirst);
574 agfl_last = be32_to_cpu(agf->agf_fllast);
575 agfl_count = be32_to_cpu(agf->agf_flcount);
576 if (agfl_last > agfl_first)
577 fl_count = agfl_last - agfl_first + 1;
578 else
579 fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
580 if (agfl_count != 0 && fl_count != agfl_count)
581 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
582
583 /* Do the incore counters match? */
584 pag = xfs_perag_get(mp, agno);
585 if (pag->pagf_freeblks != be32_to_cpu(agf->agf_freeblks))
586 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
587 if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount))
588 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
589 if (pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks))
590 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
591 xfs_perag_put(pag);
592
593 xchk_agf_xref(sc);
594 out:
595 return error;
596 }
597
598 /* AGFL */
599
600 struct xchk_agfl_info {
601 unsigned int sz_entries;
602 unsigned int nr_entries;
603 xfs_agblock_t *entries;
604 struct xfs_scrub *sc;
605 };
606
607 /* Cross-reference with the other btrees. */
608 STATIC void
xchk_agfl_block_xref(struct xfs_scrub * sc,xfs_agblock_t agbno)609 xchk_agfl_block_xref(
610 struct xfs_scrub *sc,
611 xfs_agblock_t agbno)
612 {
613 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
614 return;
615
616 xchk_xref_is_used_space(sc, agbno, 1);
617 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
618 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG);
619 xchk_xref_is_not_shared(sc, agbno, 1);
620 }
621
622 /* Scrub an AGFL block. */
623 STATIC int
xchk_agfl_block(struct xfs_mount * mp,xfs_agblock_t agbno,void * priv)624 xchk_agfl_block(
625 struct xfs_mount *mp,
626 xfs_agblock_t agbno,
627 void *priv)
628 {
629 struct xchk_agfl_info *sai = priv;
630 struct xfs_scrub *sc = sai->sc;
631 xfs_agnumber_t agno = sc->sa.agno;
632
633 if (xfs_verify_agbno(mp, agno, agbno) &&
634 sai->nr_entries < sai->sz_entries)
635 sai->entries[sai->nr_entries++] = agbno;
636 else
637 xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
638
639 xchk_agfl_block_xref(sc, agbno);
640
641 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
642 return -ECANCELED;
643
644 return 0;
645 }
646
647 static int
xchk_agblock_cmp(const void * pa,const void * pb)648 xchk_agblock_cmp(
649 const void *pa,
650 const void *pb)
651 {
652 const xfs_agblock_t *a = pa;
653 const xfs_agblock_t *b = pb;
654
655 return (int)*a - (int)*b;
656 }
657
658 /* Cross-reference with the other btrees. */
659 STATIC void
xchk_agfl_xref(struct xfs_scrub * sc)660 xchk_agfl_xref(
661 struct xfs_scrub *sc)
662 {
663 struct xfs_mount *mp = sc->mp;
664 xfs_agblock_t agbno;
665 int error;
666
667 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
668 return;
669
670 agbno = XFS_AGFL_BLOCK(mp);
671
672 error = xchk_ag_btcur_init(sc, &sc->sa);
673 if (error)
674 return;
675
676 xchk_xref_is_used_space(sc, agbno, 1);
677 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
678 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
679 xchk_xref_is_not_shared(sc, agbno, 1);
680
681 /*
682 * Scrub teardown will take care of sc->sa for us. Leave sc->sa
683 * active so that the agfl block xref can use it too.
684 */
685 }
686
687 /* Scrub the AGFL. */
688 int
xchk_agfl(struct xfs_scrub * sc)689 xchk_agfl(
690 struct xfs_scrub *sc)
691 {
692 struct xchk_agfl_info sai;
693 struct xfs_agf *agf;
694 xfs_agnumber_t agno;
695 unsigned int agflcount;
696 unsigned int i;
697 int error;
698
699 agno = sc->sa.agno = sc->sm->sm_agno;
700 error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
701 &sc->sa.agf_bp, &sc->sa.agfl_bp);
702 if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
703 goto out;
704 if (!sc->sa.agf_bp)
705 return -EFSCORRUPTED;
706 xchk_buffer_recheck(sc, sc->sa.agfl_bp);
707
708 xchk_agfl_xref(sc);
709
710 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
711 goto out;
712
713 /* Allocate buffer to ensure uniqueness of AGFL entries. */
714 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
715 agflcount = be32_to_cpu(agf->agf_flcount);
716 if (agflcount > xfs_agfl_size(sc->mp)) {
717 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
718 goto out;
719 }
720 memset(&sai, 0, sizeof(sai));
721 sai.sc = sc;
722 sai.sz_entries = agflcount;
723 sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount,
724 KM_MAYFAIL);
725 if (!sai.entries) {
726 error = -ENOMEM;
727 goto out;
728 }
729
730 /* Check the blocks in the AGFL. */
731 error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
732 sc->sa.agfl_bp, xchk_agfl_block, &sai);
733 if (error == -ECANCELED) {
734 error = 0;
735 goto out_free;
736 }
737 if (error)
738 goto out_free;
739
740 if (agflcount != sai.nr_entries) {
741 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
742 goto out_free;
743 }
744
745 /* Sort entries, check for duplicates. */
746 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
747 xchk_agblock_cmp, NULL);
748 for (i = 1; i < sai.nr_entries; i++) {
749 if (sai.entries[i] == sai.entries[i - 1]) {
750 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
751 break;
752 }
753 }
754
755 out_free:
756 kmem_free(sai.entries);
757 out:
758 return error;
759 }
760
761 /* AGI */
762
763 /* Check agi_count/agi_freecount */
764 static inline void
xchk_agi_xref_icounts(struct xfs_scrub * sc)765 xchk_agi_xref_icounts(
766 struct xfs_scrub *sc)
767 {
768 struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
769 xfs_agino_t icount;
770 xfs_agino_t freecount;
771 int error;
772
773 if (!sc->sa.ino_cur)
774 return;
775
776 error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
777 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
778 return;
779 if (be32_to_cpu(agi->agi_count) != icount ||
780 be32_to_cpu(agi->agi_freecount) != freecount)
781 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
782 }
783
784 /* Cross-reference with the other btrees. */
785 STATIC void
xchk_agi_xref(struct xfs_scrub * sc)786 xchk_agi_xref(
787 struct xfs_scrub *sc)
788 {
789 struct xfs_mount *mp = sc->mp;
790 xfs_agblock_t agbno;
791 int error;
792
793 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
794 return;
795
796 agbno = XFS_AGI_BLOCK(mp);
797
798 error = xchk_ag_btcur_init(sc, &sc->sa);
799 if (error)
800 return;
801
802 xchk_xref_is_used_space(sc, agbno, 1);
803 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
804 xchk_agi_xref_icounts(sc);
805 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
806 xchk_xref_is_not_shared(sc, agbno, 1);
807
808 /* scrub teardown will take care of sc->sa for us */
809 }
810
811 /* Scrub the AGI. */
812 int
xchk_agi(struct xfs_scrub * sc)813 xchk_agi(
814 struct xfs_scrub *sc)
815 {
816 struct xfs_mount *mp = sc->mp;
817 struct xfs_agi *agi;
818 struct xfs_perag *pag;
819 xfs_agnumber_t agno;
820 xfs_agblock_t agbno;
821 xfs_agblock_t eoag;
822 xfs_agino_t agino;
823 xfs_agino_t first_agino;
824 xfs_agino_t last_agino;
825 xfs_agino_t icount;
826 int i;
827 int level;
828 int error = 0;
829
830 agno = sc->sa.agno = sc->sm->sm_agno;
831 error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
832 &sc->sa.agf_bp, &sc->sa.agfl_bp);
833 if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
834 goto out;
835 xchk_buffer_recheck(sc, sc->sa.agi_bp);
836
837 agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
838
839 /* Check the AG length */
840 eoag = be32_to_cpu(agi->agi_length);
841 if (eoag != xfs_ag_block_count(mp, agno))
842 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
843
844 /* Check btree roots and levels */
845 agbno = be32_to_cpu(agi->agi_root);
846 if (!xfs_verify_agbno(mp, agno, agbno))
847 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
848
849 level = be32_to_cpu(agi->agi_level);
850 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
851 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
852
853 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
854 agbno = be32_to_cpu(agi->agi_free_root);
855 if (!xfs_verify_agbno(mp, agno, agbno))
856 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
857
858 level = be32_to_cpu(agi->agi_free_level);
859 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
860 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
861 }
862
863 /* Check inode counters */
864 xfs_agino_range(mp, agno, &first_agino, &last_agino);
865 icount = be32_to_cpu(agi->agi_count);
866 if (icount > last_agino - first_agino + 1 ||
867 icount < be32_to_cpu(agi->agi_freecount))
868 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
869
870 /* Check inode pointers */
871 agino = be32_to_cpu(agi->agi_newino);
872 if (!xfs_verify_agino_or_null(mp, agno, agino))
873 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
874
875 agino = be32_to_cpu(agi->agi_dirino);
876 if (!xfs_verify_agino_or_null(mp, agno, agino))
877 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
878
879 /* Check unlinked inode buckets */
880 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
881 agino = be32_to_cpu(agi->agi_unlinked[i]);
882 if (!xfs_verify_agino_or_null(mp, agno, agino))
883 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
884 }
885
886 if (agi->agi_pad32 != cpu_to_be32(0))
887 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
888
889 /* Do the incore counters match? */
890 pag = xfs_perag_get(mp, agno);
891 if (pag->pagi_count != be32_to_cpu(agi->agi_count))
892 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
893 if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount))
894 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
895 xfs_perag_put(pag);
896
897 xchk_agi_xref(sc);
898 out:
899 return error;
900 }
901