1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_defer.h"
13 #include "xfs_btree.h"
14 #include "xfs_bit.h"
15 #include "xfs_log_format.h"
16 #include "xfs_trans.h"
17 #include "xfs_sb.h"
18 #include "xfs_inode.h"
19 #include "xfs_alloc.h"
20 #include "xfs_ialloc.h"
21 #include "xfs_rmap.h"
22 #include "scrub/xfs_scrub.h"
23 #include "scrub/scrub.h"
24 #include "scrub/common.h"
25 #include "scrub/trace.h"
26
27 /* Superblock */
28
29 /* Cross-reference with the other btrees. */
30 STATIC void
xchk_superblock_xref(struct xfs_scrub * sc,struct xfs_buf * bp)31 xchk_superblock_xref(
32 struct xfs_scrub *sc,
33 struct xfs_buf *bp)
34 {
35 struct xfs_owner_info oinfo;
36 struct xfs_mount *mp = sc->mp;
37 xfs_agnumber_t agno = sc->sm->sm_agno;
38 xfs_agblock_t agbno;
39 int error;
40
41 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
42 return;
43
44 agbno = XFS_SB_BLOCK(mp);
45
46 error = xchk_ag_init(sc, agno, &sc->sa);
47 if (!xchk_xref_process_error(sc, agno, agbno, &error))
48 return;
49
50 xchk_xref_is_used_space(sc, agbno, 1);
51 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
52 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
53 xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
54 xchk_xref_is_not_shared(sc, agbno, 1);
55
56 /* scrub teardown will take care of sc->sa for us */
57 }
58
59 /*
60 * Scrub the filesystem superblock.
61 *
62 * Note: We do /not/ attempt to check AG 0's superblock. Mount is
63 * responsible for validating all the geometry information in sb 0, so
64 * if the filesystem is capable of initiating online scrub, then clearly
65 * sb 0 is ok and we can use its information to check everything else.
66 */
67 int
xchk_superblock(struct xfs_scrub * sc)68 xchk_superblock(
69 struct xfs_scrub *sc)
70 {
71 struct xfs_mount *mp = sc->mp;
72 struct xfs_buf *bp;
73 struct xfs_dsb *sb;
74 xfs_agnumber_t agno;
75 uint32_t v2_ok;
76 __be32 features_mask;
77 int error;
78 __be16 vernum_mask;
79
80 agno = sc->sm->sm_agno;
81 if (agno == 0)
82 return 0;
83
84 error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp);
85 /*
86 * The superblock verifier can return several different error codes
87 * if it thinks the superblock doesn't look right. For a mount these
88 * would all get bounced back to userspace, but if we're here then the
89 * fs mounted successfully, which means that this secondary superblock
90 * is simply incorrect. Treat all these codes the same way we treat
91 * any corruption.
92 */
93 switch (error) {
94 case -EINVAL: /* also -EWRONGFS */
95 case -ENOSYS:
96 case -EFBIG:
97 error = -EFSCORRUPTED;
98 default:
99 break;
100 }
101 if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
102 return error;
103
104 sb = XFS_BUF_TO_SBP(bp);
105
106 /*
107 * Verify the geometries match. Fields that are permanently
108 * set by mkfs are checked; fields that can be updated later
109 * (and are not propagated to backup superblocks) are preen
110 * checked.
111 */
112 if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
113 xchk_block_set_corrupt(sc, bp);
114
115 if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
116 xchk_block_set_corrupt(sc, bp);
117
118 if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
119 xchk_block_set_corrupt(sc, bp);
120
121 if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
122 xchk_block_set_corrupt(sc, bp);
123
124 if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
125 xchk_block_set_preen(sc, bp);
126
127 if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
128 xchk_block_set_corrupt(sc, bp);
129
130 if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
131 xchk_block_set_preen(sc, bp);
132
133 if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
134 xchk_block_set_preen(sc, bp);
135
136 if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
137 xchk_block_set_preen(sc, bp);
138
139 if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
140 xchk_block_set_corrupt(sc, bp);
141
142 if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
143 xchk_block_set_corrupt(sc, bp);
144
145 if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
146 xchk_block_set_corrupt(sc, bp);
147
148 if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
149 xchk_block_set_corrupt(sc, bp);
150
151 if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
152 xchk_block_set_corrupt(sc, bp);
153
154 /* Check sb_versionnum bits that are set at mkfs time. */
155 vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
156 XFS_SB_VERSION_NUMBITS |
157 XFS_SB_VERSION_ALIGNBIT |
158 XFS_SB_VERSION_DALIGNBIT |
159 XFS_SB_VERSION_SHAREDBIT |
160 XFS_SB_VERSION_LOGV2BIT |
161 XFS_SB_VERSION_SECTORBIT |
162 XFS_SB_VERSION_EXTFLGBIT |
163 XFS_SB_VERSION_DIRV2BIT);
164 if ((sb->sb_versionnum & vernum_mask) !=
165 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
166 xchk_block_set_corrupt(sc, bp);
167
168 /* Check sb_versionnum bits that can be set after mkfs time. */
169 vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
170 XFS_SB_VERSION_NLINKBIT |
171 XFS_SB_VERSION_QUOTABIT);
172 if ((sb->sb_versionnum & vernum_mask) !=
173 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
174 xchk_block_set_preen(sc, bp);
175
176 if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
177 xchk_block_set_corrupt(sc, bp);
178
179 if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
180 xchk_block_set_corrupt(sc, bp);
181
182 if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
183 xchk_block_set_corrupt(sc, bp);
184
185 if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
186 xchk_block_set_preen(sc, bp);
187
188 if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
189 xchk_block_set_corrupt(sc, bp);
190
191 if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
192 xchk_block_set_corrupt(sc, bp);
193
194 if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
195 xchk_block_set_corrupt(sc, bp);
196
197 if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
198 xchk_block_set_corrupt(sc, bp);
199
200 if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
201 xchk_block_set_corrupt(sc, bp);
202
203 if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
204 xchk_block_set_corrupt(sc, bp);
205
206 if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
207 xchk_block_set_preen(sc, bp);
208
209 /*
210 * Skip the summary counters since we track them in memory anyway.
211 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
212 */
213
214 if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
215 xchk_block_set_preen(sc, bp);
216
217 if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
218 xchk_block_set_preen(sc, bp);
219
220 /*
221 * Skip the quota flags since repair will force quotacheck.
222 * sb_qflags
223 */
224
225 if (sb->sb_flags != mp->m_sb.sb_flags)
226 xchk_block_set_corrupt(sc, bp);
227
228 if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
229 xchk_block_set_corrupt(sc, bp);
230
231 if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
232 xchk_block_set_corrupt(sc, bp);
233
234 if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
235 xchk_block_set_preen(sc, bp);
236
237 if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
238 xchk_block_set_preen(sc, bp);
239
240 if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
241 xchk_block_set_corrupt(sc, bp);
242
243 if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
244 xchk_block_set_corrupt(sc, bp);
245
246 if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
247 xchk_block_set_corrupt(sc, bp);
248
249 if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
250 xchk_block_set_corrupt(sc, bp);
251
252 /* Do we see any invalid bits in sb_features2? */
253 if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
254 if (sb->sb_features2 != 0)
255 xchk_block_set_corrupt(sc, bp);
256 } else {
257 v2_ok = XFS_SB_VERSION2_OKBITS;
258 if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
259 v2_ok |= XFS_SB_VERSION2_CRCBIT;
260
261 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
262 xchk_block_set_corrupt(sc, bp);
263
264 if (sb->sb_features2 != sb->sb_bad_features2)
265 xchk_block_set_preen(sc, bp);
266 }
267
268 /* Check sb_features2 flags that are set at mkfs time. */
269 features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
270 XFS_SB_VERSION2_PROJID32BIT |
271 XFS_SB_VERSION2_CRCBIT |
272 XFS_SB_VERSION2_FTYPE);
273 if ((sb->sb_features2 & features_mask) !=
274 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
275 xchk_block_set_corrupt(sc, bp);
276
277 /* Check sb_features2 flags that can be set after mkfs time. */
278 features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
279 if ((sb->sb_features2 & features_mask) !=
280 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
281 xchk_block_set_corrupt(sc, bp);
282
283 if (!xfs_sb_version_hascrc(&mp->m_sb)) {
284 /* all v5 fields must be zero */
285 if (memchr_inv(&sb->sb_features_compat, 0,
286 sizeof(struct xfs_dsb) -
287 offsetof(struct xfs_dsb, sb_features_compat)))
288 xchk_block_set_corrupt(sc, bp);
289 } else {
290 /* Check compat flags; all are set at mkfs time. */
291 features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
292 if ((sb->sb_features_compat & features_mask) !=
293 (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
294 xchk_block_set_corrupt(sc, bp);
295
296 /* Check ro compat flags; all are set at mkfs time. */
297 features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
298 XFS_SB_FEAT_RO_COMPAT_FINOBT |
299 XFS_SB_FEAT_RO_COMPAT_RMAPBT |
300 XFS_SB_FEAT_RO_COMPAT_REFLINK);
301 if ((sb->sb_features_ro_compat & features_mask) !=
302 (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
303 features_mask))
304 xchk_block_set_corrupt(sc, bp);
305
306 /* Check incompat flags; all are set at mkfs time. */
307 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
308 XFS_SB_FEAT_INCOMPAT_FTYPE |
309 XFS_SB_FEAT_INCOMPAT_SPINODES |
310 XFS_SB_FEAT_INCOMPAT_META_UUID);
311 if ((sb->sb_features_incompat & features_mask) !=
312 (cpu_to_be32(mp->m_sb.sb_features_incompat) &
313 features_mask))
314 xchk_block_set_corrupt(sc, bp);
315
316 /* Check log incompat flags; all are set at mkfs time. */
317 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
318 if ((sb->sb_features_log_incompat & features_mask) !=
319 (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
320 features_mask))
321 xchk_block_set_corrupt(sc, bp);
322
323 /* Don't care about sb_crc */
324
325 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
326 xchk_block_set_corrupt(sc, bp);
327
328 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
329 xchk_block_set_preen(sc, bp);
330
331 /* Don't care about sb_lsn */
332 }
333
334 if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
335 /* The metadata UUID must be the same for all supers */
336 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
337 xchk_block_set_corrupt(sc, bp);
338 }
339
340 /* Everything else must be zero. */
341 if (memchr_inv(sb + 1, 0,
342 BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
343 xchk_block_set_corrupt(sc, bp);
344
345 xchk_superblock_xref(sc, bp);
346
347 return error;
348 }
349
350 /* AGF */
351
352 /* Tally freespace record lengths. */
353 STATIC int
xchk_agf_record_bno_lengths(struct xfs_btree_cur * cur,struct xfs_alloc_rec_incore * rec,void * priv)354 xchk_agf_record_bno_lengths(
355 struct xfs_btree_cur *cur,
356 struct xfs_alloc_rec_incore *rec,
357 void *priv)
358 {
359 xfs_extlen_t *blocks = priv;
360
361 (*blocks) += rec->ar_blockcount;
362 return 0;
363 }
364
365 /* Check agf_freeblks */
366 static inline void
xchk_agf_xref_freeblks(struct xfs_scrub * sc)367 xchk_agf_xref_freeblks(
368 struct xfs_scrub *sc)
369 {
370 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
371 xfs_extlen_t blocks = 0;
372 int error;
373
374 if (!sc->sa.bno_cur)
375 return;
376
377 error = xfs_alloc_query_all(sc->sa.bno_cur,
378 xchk_agf_record_bno_lengths, &blocks);
379 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
380 return;
381 if (blocks != be32_to_cpu(agf->agf_freeblks))
382 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
383 }
384
385 /* Cross reference the AGF with the cntbt (freespace by length btree) */
386 static inline void
xchk_agf_xref_cntbt(struct xfs_scrub * sc)387 xchk_agf_xref_cntbt(
388 struct xfs_scrub *sc)
389 {
390 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
391 xfs_agblock_t agbno;
392 xfs_extlen_t blocks;
393 int have;
394 int error;
395
396 if (!sc->sa.cnt_cur)
397 return;
398
399 /* Any freespace at all? */
400 error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
401 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
402 return;
403 if (!have) {
404 if (agf->agf_freeblks != be32_to_cpu(0))
405 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
406 return;
407 }
408
409 /* Check agf_longest */
410 error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
411 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
412 return;
413 if (!have || blocks != be32_to_cpu(agf->agf_longest))
414 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
415 }
416
417 /* Check the btree block counts in the AGF against the btrees. */
418 STATIC void
xchk_agf_xref_btreeblks(struct xfs_scrub * sc)419 xchk_agf_xref_btreeblks(
420 struct xfs_scrub *sc)
421 {
422 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
423 struct xfs_mount *mp = sc->mp;
424 xfs_agblock_t blocks;
425 xfs_agblock_t btreeblks;
426 int error;
427
428 /* Check agf_rmap_blocks; set up for agf_btreeblks check */
429 if (sc->sa.rmap_cur) {
430 error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
431 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
432 return;
433 btreeblks = blocks - 1;
434 if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
435 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
436 } else {
437 btreeblks = 0;
438 }
439
440 /*
441 * No rmap cursor; we can't xref if we have the rmapbt feature.
442 * We also can't do it if we're missing the free space btree cursors.
443 */
444 if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
445 !sc->sa.bno_cur || !sc->sa.cnt_cur)
446 return;
447
448 /* Check agf_btreeblks */
449 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
450 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
451 return;
452 btreeblks += blocks - 1;
453
454 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
455 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
456 return;
457 btreeblks += blocks - 1;
458
459 if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
460 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
461 }
462
463 /* Check agf_refcount_blocks against tree size */
464 static inline void
xchk_agf_xref_refcblks(struct xfs_scrub * sc)465 xchk_agf_xref_refcblks(
466 struct xfs_scrub *sc)
467 {
468 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
469 xfs_agblock_t blocks;
470 int error;
471
472 if (!sc->sa.refc_cur)
473 return;
474
475 error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
476 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
477 return;
478 if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
479 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
480 }
481
482 /* Cross-reference with the other btrees. */
483 STATIC void
xchk_agf_xref(struct xfs_scrub * sc)484 xchk_agf_xref(
485 struct xfs_scrub *sc)
486 {
487 struct xfs_owner_info oinfo;
488 struct xfs_mount *mp = sc->mp;
489 xfs_agblock_t agbno;
490 int error;
491
492 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
493 return;
494
495 agbno = XFS_AGF_BLOCK(mp);
496
497 error = xchk_ag_btcur_init(sc, &sc->sa);
498 if (error)
499 return;
500
501 xchk_xref_is_used_space(sc, agbno, 1);
502 xchk_agf_xref_freeblks(sc);
503 xchk_agf_xref_cntbt(sc);
504 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
505 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
506 xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
507 xchk_agf_xref_btreeblks(sc);
508 xchk_xref_is_not_shared(sc, agbno, 1);
509 xchk_agf_xref_refcblks(sc);
510
511 /* scrub teardown will take care of sc->sa for us */
512 }
513
514 /* Scrub the AGF. */
515 int
xchk_agf(struct xfs_scrub * sc)516 xchk_agf(
517 struct xfs_scrub *sc)
518 {
519 struct xfs_mount *mp = sc->mp;
520 struct xfs_agf *agf;
521 xfs_agnumber_t agno;
522 xfs_agblock_t agbno;
523 xfs_agblock_t eoag;
524 xfs_agblock_t agfl_first;
525 xfs_agblock_t agfl_last;
526 xfs_agblock_t agfl_count;
527 xfs_agblock_t fl_count;
528 int level;
529 int error = 0;
530
531 agno = sc->sa.agno = sc->sm->sm_agno;
532 error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
533 &sc->sa.agf_bp, &sc->sa.agfl_bp);
534 if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
535 goto out;
536 xchk_buffer_recheck(sc, sc->sa.agf_bp);
537
538 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
539
540 /* Check the AG length */
541 eoag = be32_to_cpu(agf->agf_length);
542 if (eoag != xfs_ag_block_count(mp, agno))
543 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
544
545 /* Check the AGF btree roots and levels */
546 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
547 if (!xfs_verify_agbno(mp, agno, agbno))
548 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
549
550 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
551 if (!xfs_verify_agbno(mp, agno, agbno))
552 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
553
554 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
555 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
556 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
557
558 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
559 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
560 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
561
562 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
563 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
564 if (!xfs_verify_agbno(mp, agno, agbno))
565 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
566
567 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
568 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
569 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
570 }
571
572 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
573 agbno = be32_to_cpu(agf->agf_refcount_root);
574 if (!xfs_verify_agbno(mp, agno, agbno))
575 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
576
577 level = be32_to_cpu(agf->agf_refcount_level);
578 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
579 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
580 }
581
582 /* Check the AGFL counters */
583 agfl_first = be32_to_cpu(agf->agf_flfirst);
584 agfl_last = be32_to_cpu(agf->agf_fllast);
585 agfl_count = be32_to_cpu(agf->agf_flcount);
586 if (agfl_last > agfl_first)
587 fl_count = agfl_last - agfl_first + 1;
588 else
589 fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
590 if (agfl_count != 0 && fl_count != agfl_count)
591 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
592
593 xchk_agf_xref(sc);
594 out:
595 return error;
596 }
597
598 /* AGFL */
599
600 struct xchk_agfl_info {
601 struct xfs_owner_info oinfo;
602 unsigned int sz_entries;
603 unsigned int nr_entries;
604 xfs_agblock_t *entries;
605 struct xfs_scrub *sc;
606 };
607
608 /* Cross-reference with the other btrees. */
609 STATIC void
xchk_agfl_block_xref(struct xfs_scrub * sc,xfs_agblock_t agbno,struct xfs_owner_info * oinfo)610 xchk_agfl_block_xref(
611 struct xfs_scrub *sc,
612 xfs_agblock_t agbno,
613 struct xfs_owner_info *oinfo)
614 {
615 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
616 return;
617
618 xchk_xref_is_used_space(sc, agbno, 1);
619 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
620 xchk_xref_is_owned_by(sc, agbno, 1, oinfo);
621 xchk_xref_is_not_shared(sc, agbno, 1);
622 }
623
624 /* Scrub an AGFL block. */
625 STATIC int
xchk_agfl_block(struct xfs_mount * mp,xfs_agblock_t agbno,void * priv)626 xchk_agfl_block(
627 struct xfs_mount *mp,
628 xfs_agblock_t agbno,
629 void *priv)
630 {
631 struct xchk_agfl_info *sai = priv;
632 struct xfs_scrub *sc = sai->sc;
633 xfs_agnumber_t agno = sc->sa.agno;
634
635 if (xfs_verify_agbno(mp, agno, agbno) &&
636 sai->nr_entries < sai->sz_entries)
637 sai->entries[sai->nr_entries++] = agbno;
638 else
639 xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
640
641 xchk_agfl_block_xref(sc, agbno, priv);
642
643 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
644 return XFS_BTREE_QUERY_RANGE_ABORT;
645
646 return 0;
647 }
648
649 static int
xchk_agblock_cmp(const void * pa,const void * pb)650 xchk_agblock_cmp(
651 const void *pa,
652 const void *pb)
653 {
654 const xfs_agblock_t *a = pa;
655 const xfs_agblock_t *b = pb;
656
657 return (int)*a - (int)*b;
658 }
659
660 /* Cross-reference with the other btrees. */
661 STATIC void
xchk_agfl_xref(struct xfs_scrub * sc)662 xchk_agfl_xref(
663 struct xfs_scrub *sc)
664 {
665 struct xfs_owner_info oinfo;
666 struct xfs_mount *mp = sc->mp;
667 xfs_agblock_t agbno;
668 int error;
669
670 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
671 return;
672
673 agbno = XFS_AGFL_BLOCK(mp);
674
675 error = xchk_ag_btcur_init(sc, &sc->sa);
676 if (error)
677 return;
678
679 xchk_xref_is_used_space(sc, agbno, 1);
680 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
681 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
682 xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
683 xchk_xref_is_not_shared(sc, agbno, 1);
684
685 /*
686 * Scrub teardown will take care of sc->sa for us. Leave sc->sa
687 * active so that the agfl block xref can use it too.
688 */
689 }
690
691 /* Scrub the AGFL. */
692 int
xchk_agfl(struct xfs_scrub * sc)693 xchk_agfl(
694 struct xfs_scrub *sc)
695 {
696 struct xchk_agfl_info sai;
697 struct xfs_agf *agf;
698 xfs_agnumber_t agno;
699 unsigned int agflcount;
700 unsigned int i;
701 int error;
702
703 agno = sc->sa.agno = sc->sm->sm_agno;
704 error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
705 &sc->sa.agf_bp, &sc->sa.agfl_bp);
706 if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
707 goto out;
708 if (!sc->sa.agf_bp)
709 return -EFSCORRUPTED;
710 xchk_buffer_recheck(sc, sc->sa.agfl_bp);
711
712 xchk_agfl_xref(sc);
713
714 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
715 goto out;
716
717 /* Allocate buffer to ensure uniqueness of AGFL entries. */
718 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
719 agflcount = be32_to_cpu(agf->agf_flcount);
720 if (agflcount > xfs_agfl_size(sc->mp)) {
721 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
722 goto out;
723 }
724 memset(&sai, 0, sizeof(sai));
725 sai.sc = sc;
726 sai.sz_entries = agflcount;
727 sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount,
728 KM_MAYFAIL);
729 if (!sai.entries) {
730 error = -ENOMEM;
731 goto out;
732 }
733
734 /* Check the blocks in the AGFL. */
735 xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG);
736 error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
737 sc->sa.agfl_bp, xchk_agfl_block, &sai);
738 if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
739 error = 0;
740 goto out_free;
741 }
742 if (error)
743 goto out_free;
744
745 if (agflcount != sai.nr_entries) {
746 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
747 goto out_free;
748 }
749
750 /* Sort entries, check for duplicates. */
751 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
752 xchk_agblock_cmp, NULL);
753 for (i = 1; i < sai.nr_entries; i++) {
754 if (sai.entries[i] == sai.entries[i - 1]) {
755 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
756 break;
757 }
758 }
759
760 out_free:
761 kmem_free(sai.entries);
762 out:
763 return error;
764 }
765
766 /* AGI */
767
768 /* Check agi_count/agi_freecount */
769 static inline void
xchk_agi_xref_icounts(struct xfs_scrub * sc)770 xchk_agi_xref_icounts(
771 struct xfs_scrub *sc)
772 {
773 struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
774 xfs_agino_t icount;
775 xfs_agino_t freecount;
776 int error;
777
778 if (!sc->sa.ino_cur)
779 return;
780
781 error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
782 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
783 return;
784 if (be32_to_cpu(agi->agi_count) != icount ||
785 be32_to_cpu(agi->agi_freecount) != freecount)
786 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
787 }
788
789 /* Cross-reference with the other btrees. */
790 STATIC void
xchk_agi_xref(struct xfs_scrub * sc)791 xchk_agi_xref(
792 struct xfs_scrub *sc)
793 {
794 struct xfs_owner_info oinfo;
795 struct xfs_mount *mp = sc->mp;
796 xfs_agblock_t agbno;
797 int error;
798
799 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
800 return;
801
802 agbno = XFS_AGI_BLOCK(mp);
803
804 error = xchk_ag_btcur_init(sc, &sc->sa);
805 if (error)
806 return;
807
808 xchk_xref_is_used_space(sc, agbno, 1);
809 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
810 xchk_agi_xref_icounts(sc);
811 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
812 xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
813 xchk_xref_is_not_shared(sc, agbno, 1);
814
815 /* scrub teardown will take care of sc->sa for us */
816 }
817
818 /* Scrub the AGI. */
819 int
xchk_agi(struct xfs_scrub * sc)820 xchk_agi(
821 struct xfs_scrub *sc)
822 {
823 struct xfs_mount *mp = sc->mp;
824 struct xfs_agi *agi;
825 xfs_agnumber_t agno;
826 xfs_agblock_t agbno;
827 xfs_agblock_t eoag;
828 xfs_agino_t agino;
829 xfs_agino_t first_agino;
830 xfs_agino_t last_agino;
831 xfs_agino_t icount;
832 int i;
833 int level;
834 int error = 0;
835
836 agno = sc->sa.agno = sc->sm->sm_agno;
837 error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
838 &sc->sa.agf_bp, &sc->sa.agfl_bp);
839 if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
840 goto out;
841 xchk_buffer_recheck(sc, sc->sa.agi_bp);
842
843 agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
844
845 /* Check the AG length */
846 eoag = be32_to_cpu(agi->agi_length);
847 if (eoag != xfs_ag_block_count(mp, agno))
848 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
849
850 /* Check btree roots and levels */
851 agbno = be32_to_cpu(agi->agi_root);
852 if (!xfs_verify_agbno(mp, agno, agbno))
853 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
854
855 level = be32_to_cpu(agi->agi_level);
856 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
857 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
858
859 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
860 agbno = be32_to_cpu(agi->agi_free_root);
861 if (!xfs_verify_agbno(mp, agno, agbno))
862 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
863
864 level = be32_to_cpu(agi->agi_free_level);
865 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
866 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
867 }
868
869 /* Check inode counters */
870 xfs_agino_range(mp, agno, &first_agino, &last_agino);
871 icount = be32_to_cpu(agi->agi_count);
872 if (icount > last_agino - first_agino + 1 ||
873 icount < be32_to_cpu(agi->agi_freecount))
874 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
875
876 /* Check inode pointers */
877 agino = be32_to_cpu(agi->agi_newino);
878 if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
879 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
880
881 agino = be32_to_cpu(agi->agi_dirino);
882 if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
883 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
884
885 /* Check unlinked inode buckets */
886 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
887 agino = be32_to_cpu(agi->agi_unlinked[i]);
888 if (agino == NULLAGINO)
889 continue;
890 if (!xfs_verify_agino(mp, agno, agino))
891 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
892 }
893
894 if (agi->agi_pad32 != cpu_to_be32(0))
895 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
896
897 xchk_agi_xref(sc);
898 out:
899 return error;
900 }
901