1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_defer.h"
13 #include "xfs_btree.h"
14 #include "xfs_bit.h"
15 #include "xfs_log_format.h"
16 #include "xfs_trans.h"
17 #include "xfs_sb.h"
18 #include "xfs_alloc.h"
19 #include "xfs_rmap.h"
20 #include "xfs_refcount.h"
21 #include "scrub/xfs_scrub.h"
22 #include "scrub/scrub.h"
23 #include "scrub/common.h"
24 #include "scrub/btree.h"
25 #include "scrub/trace.h"
26
27 /*
28 * Set us up to scrub reference count btrees.
29 */
30 int
xchk_setup_ag_refcountbt(struct xfs_scrub * sc,struct xfs_inode * ip)31 xchk_setup_ag_refcountbt(
32 struct xfs_scrub *sc,
33 struct xfs_inode *ip)
34 {
35 return xchk_setup_ag_btree(sc, ip, false);
36 }
37
38 /* Reference count btree scrubber. */
39
40 /*
41 * Confirming Reference Counts via Reverse Mappings
42 *
43 * We want to count the reverse mappings overlapping a refcount record
44 * (bno, len, refcount), allowing for the possibility that some of the
45 * overlap may come from smaller adjoining reverse mappings, while some
46 * comes from single extents which overlap the range entirely. The
47 * outer loop is as follows:
48 *
49 * 1. For all reverse mappings overlapping the refcount extent,
50 * a. If a given rmap completely overlaps, mark it as seen.
51 * b. Otherwise, record the fragment (in agbno order) for later
52 * processing.
53 *
54 * Once we've seen all the rmaps, we know that for all blocks in the
55 * refcount record we want to find $refcount owners and we've already
56 * visited $seen extents that overlap all the blocks. Therefore, we
57 * need to find ($refcount - $seen) owners for every block in the
58 * extent; call that quantity $target_nr. Proceed as follows:
59 *
60 * 2. Pull the first $target_nr fragments from the list; all of them
61 * should start at or before the start of the extent.
62 * Call this subset of fragments the working set.
63 * 3. Until there are no more unprocessed fragments,
64 * a. Find the shortest fragments in the set and remove them.
65 * b. Note the block number of the end of these fragments.
66 * c. Pull the same number of fragments from the list. All of these
67 * fragments should start at the block number recorded in the
68 * previous step.
69 * d. Put those fragments in the set.
70 * 4. Check that there are $target_nr fragments remaining in the list,
71 * and that they all end at or beyond the end of the refcount extent.
72 *
73 * If the refcount is correct, all the check conditions in the algorithm
74 * should always hold true. If not, the refcount is incorrect.
75 */
76 struct xchk_refcnt_frag {
77 struct list_head list;
78 struct xfs_rmap_irec rm;
79 };
80
81 struct xchk_refcnt_check {
82 struct xfs_scrub *sc;
83 struct list_head fragments;
84
85 /* refcount extent we're examining */
86 xfs_agblock_t bno;
87 xfs_extlen_t len;
88 xfs_nlink_t refcount;
89
90 /* number of owners seen */
91 xfs_nlink_t seen;
92 };
93
94 /*
95 * Decide if the given rmap is large enough that we can redeem it
96 * towards refcount verification now, or if it's a fragment, in
97 * which case we'll hang onto it in the hopes that we'll later
98 * discover that we've collected exactly the correct number of
99 * fragments as the refcountbt says we should have.
100 */
101 STATIC int
xchk_refcountbt_rmap_check(struct xfs_btree_cur * cur,struct xfs_rmap_irec * rec,void * priv)102 xchk_refcountbt_rmap_check(
103 struct xfs_btree_cur *cur,
104 struct xfs_rmap_irec *rec,
105 void *priv)
106 {
107 struct xchk_refcnt_check *refchk = priv;
108 struct xchk_refcnt_frag *frag;
109 xfs_agblock_t rm_last;
110 xfs_agblock_t rc_last;
111 int error = 0;
112
113 if (xchk_should_terminate(refchk->sc, &error))
114 return error;
115
116 rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
117 rc_last = refchk->bno + refchk->len - 1;
118
119 /* Confirm that a single-owner refc extent is a CoW stage. */
120 if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
121 xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
122 return 0;
123 }
124
125 if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
126 /*
127 * The rmap overlaps the refcount record, so we can confirm
128 * one refcount owner seen.
129 */
130 refchk->seen++;
131 } else {
132 /*
133 * This rmap covers only part of the refcount record, so
134 * save the fragment for later processing. If the rmapbt
135 * is healthy each rmap_irec we see will be in agbno order
136 * so we don't need insertion sort here.
137 */
138 frag = kmem_alloc(sizeof(struct xchk_refcnt_frag),
139 KM_MAYFAIL);
140 if (!frag)
141 return -ENOMEM;
142 memcpy(&frag->rm, rec, sizeof(frag->rm));
143 list_add_tail(&frag->list, &refchk->fragments);
144 }
145
146 return 0;
147 }
148
149 /*
150 * Given a bunch of rmap fragments, iterate through them, keeping
151 * a running tally of the refcount. If this ever deviates from
152 * what we expect (which is the refcountbt's refcount minus the
153 * number of extents that totally covered the refcountbt extent),
154 * we have a refcountbt error.
155 */
156 STATIC void
xchk_refcountbt_process_rmap_fragments(struct xchk_refcnt_check * refchk)157 xchk_refcountbt_process_rmap_fragments(
158 struct xchk_refcnt_check *refchk)
159 {
160 struct list_head worklist;
161 struct xchk_refcnt_frag *frag;
162 struct xchk_refcnt_frag *n;
163 xfs_agblock_t bno;
164 xfs_agblock_t rbno;
165 xfs_agblock_t next_rbno;
166 xfs_nlink_t nr;
167 xfs_nlink_t target_nr;
168
169 target_nr = refchk->refcount - refchk->seen;
170 if (target_nr == 0)
171 return;
172
173 /*
174 * There are (refchk->rc.rc_refcount - refchk->nr refcount)
175 * references we haven't found yet. Pull that many off the
176 * fragment list and figure out where the smallest rmap ends
177 * (and therefore the next rmap should start). All the rmaps
178 * we pull off should start at or before the beginning of the
179 * refcount record's range.
180 */
181 INIT_LIST_HEAD(&worklist);
182 rbno = NULLAGBLOCK;
183
184 /* Make sure the fragments actually /are/ in agbno order. */
185 bno = 0;
186 list_for_each_entry(frag, &refchk->fragments, list) {
187 if (frag->rm.rm_startblock < bno)
188 goto done;
189 bno = frag->rm.rm_startblock;
190 }
191
192 /*
193 * Find all the rmaps that start at or before the refc extent,
194 * and put them on the worklist.
195 */
196 nr = 0;
197 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
198 if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
199 break;
200 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
201 if (bno < rbno)
202 rbno = bno;
203 list_move_tail(&frag->list, &worklist);
204 nr++;
205 }
206
207 /*
208 * We should have found exactly $target_nr rmap fragments starting
209 * at or before the refcount extent.
210 */
211 if (nr != target_nr)
212 goto done;
213
214 while (!list_empty(&refchk->fragments)) {
215 /* Discard any fragments ending at rbno from the worklist. */
216 nr = 0;
217 next_rbno = NULLAGBLOCK;
218 list_for_each_entry_safe(frag, n, &worklist, list) {
219 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
220 if (bno != rbno) {
221 if (bno < next_rbno)
222 next_rbno = bno;
223 continue;
224 }
225 list_del(&frag->list);
226 kmem_free(frag);
227 nr++;
228 }
229
230 /* Try to add nr rmaps starting at rbno to the worklist. */
231 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
232 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
233 if (frag->rm.rm_startblock != rbno)
234 goto done;
235 list_move_tail(&frag->list, &worklist);
236 if (next_rbno > bno)
237 next_rbno = bno;
238 nr--;
239 if (nr == 0)
240 break;
241 }
242
243 /*
244 * If we get here and nr > 0, this means that we added fewer
245 * items to the worklist than we discarded because the fragment
246 * list ran out of items. Therefore, we cannot maintain the
247 * required refcount. Something is wrong, so we're done.
248 */
249 if (nr)
250 goto done;
251
252 rbno = next_rbno;
253 }
254
255 /*
256 * Make sure the last extent we processed ends at or beyond
257 * the end of the refcount extent.
258 */
259 if (rbno < refchk->bno + refchk->len)
260 goto done;
261
262 /* Actually record us having seen the remaining refcount. */
263 refchk->seen = refchk->refcount;
264 done:
265 /* Delete fragments and work list. */
266 list_for_each_entry_safe(frag, n, &worklist, list) {
267 list_del(&frag->list);
268 kmem_free(frag);
269 }
270 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
271 list_del(&frag->list);
272 kmem_free(frag);
273 }
274 }
275
276 /* Use the rmap entries covering this extent to verify the refcount. */
277 STATIC void
xchk_refcountbt_xref_rmap(struct xfs_scrub * sc,xfs_agblock_t bno,xfs_extlen_t len,xfs_nlink_t refcount)278 xchk_refcountbt_xref_rmap(
279 struct xfs_scrub *sc,
280 xfs_agblock_t bno,
281 xfs_extlen_t len,
282 xfs_nlink_t refcount)
283 {
284 struct xchk_refcnt_check refchk = {
285 .sc = sc,
286 .bno = bno,
287 .len = len,
288 .refcount = refcount,
289 .seen = 0,
290 };
291 struct xfs_rmap_irec low;
292 struct xfs_rmap_irec high;
293 struct xchk_refcnt_frag *frag;
294 struct xchk_refcnt_frag *n;
295 int error;
296
297 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
298 return;
299
300 /* Cross-reference with the rmapbt to confirm the refcount. */
301 memset(&low, 0, sizeof(low));
302 low.rm_startblock = bno;
303 memset(&high, 0xFF, sizeof(high));
304 high.rm_startblock = bno + len - 1;
305
306 INIT_LIST_HEAD(&refchk.fragments);
307 error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
308 &xchk_refcountbt_rmap_check, &refchk);
309 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
310 goto out_free;
311
312 xchk_refcountbt_process_rmap_fragments(&refchk);
313 if (refcount != refchk.seen)
314 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
315
316 out_free:
317 list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
318 list_del(&frag->list);
319 kmem_free(frag);
320 }
321 }
322
323 /* Cross-reference with the other btrees. */
324 STATIC void
xchk_refcountbt_xref(struct xfs_scrub * sc,xfs_agblock_t agbno,xfs_extlen_t len,xfs_nlink_t refcount)325 xchk_refcountbt_xref(
326 struct xfs_scrub *sc,
327 xfs_agblock_t agbno,
328 xfs_extlen_t len,
329 xfs_nlink_t refcount)
330 {
331 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
332 return;
333
334 xchk_xref_is_used_space(sc, agbno, len);
335 xchk_xref_is_not_inode_chunk(sc, agbno, len);
336 xchk_refcountbt_xref_rmap(sc, agbno, len, refcount);
337 }
338
339 /* Scrub a refcountbt record. */
340 STATIC int
xchk_refcountbt_rec(struct xchk_btree * bs,union xfs_btree_rec * rec)341 xchk_refcountbt_rec(
342 struct xchk_btree *bs,
343 union xfs_btree_rec *rec)
344 {
345 struct xfs_mount *mp = bs->cur->bc_mp;
346 xfs_agblock_t *cow_blocks = bs->private;
347 xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
348 xfs_agblock_t bno;
349 xfs_extlen_t len;
350 xfs_nlink_t refcount;
351 bool has_cowflag;
352 int error = 0;
353
354 bno = be32_to_cpu(rec->refc.rc_startblock);
355 len = be32_to_cpu(rec->refc.rc_blockcount);
356 refcount = be32_to_cpu(rec->refc.rc_refcount);
357
358 /* Only CoW records can have refcount == 1. */
359 has_cowflag = (bno & XFS_REFC_COW_START);
360 if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag))
361 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
362 if (has_cowflag)
363 (*cow_blocks) += len;
364
365 /* Check the extent. */
366 bno &= ~XFS_REFC_COW_START;
367 if (bno + len <= bno ||
368 !xfs_verify_agbno(mp, agno, bno) ||
369 !xfs_verify_agbno(mp, agno, bno + len - 1))
370 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
371
372 if (refcount == 0)
373 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
374
375 xchk_refcountbt_xref(bs->sc, bno, len, refcount);
376
377 return error;
378 }
379
380 /* Make sure we have as many refc blocks as the rmap says. */
381 STATIC void
xchk_refcount_xref_rmap(struct xfs_scrub * sc,struct xfs_owner_info * oinfo,xfs_filblks_t cow_blocks)382 xchk_refcount_xref_rmap(
383 struct xfs_scrub *sc,
384 struct xfs_owner_info *oinfo,
385 xfs_filblks_t cow_blocks)
386 {
387 xfs_extlen_t refcbt_blocks = 0;
388 xfs_filblks_t blocks;
389 int error;
390
391 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
392 return;
393
394 /* Check that we saw as many refcbt blocks as the rmap knows about. */
395 error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
396 if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
397 return;
398 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
399 &blocks);
400 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
401 return;
402 if (blocks != refcbt_blocks)
403 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
404
405 /* Check that we saw as many cow blocks as the rmap knows about. */
406 xfs_rmap_ag_owner(oinfo, XFS_RMAP_OWN_COW);
407 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
408 &blocks);
409 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
410 return;
411 if (blocks != cow_blocks)
412 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
413 }
414
415 /* Scrub the refcount btree for some AG. */
416 int
xchk_refcountbt(struct xfs_scrub * sc)417 xchk_refcountbt(
418 struct xfs_scrub *sc)
419 {
420 struct xfs_owner_info oinfo;
421 xfs_agblock_t cow_blocks = 0;
422 int error;
423
424 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
425 error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec,
426 &oinfo, &cow_blocks);
427 if (error)
428 return error;
429
430 xchk_refcount_xref_rmap(sc, &oinfo, cow_blocks);
431
432 return 0;
433 }
434
435 /* xref check that a cow staging extent is marked in the refcountbt. */
436 void
xchk_xref_is_cow_staging(struct xfs_scrub * sc,xfs_agblock_t agbno,xfs_extlen_t len)437 xchk_xref_is_cow_staging(
438 struct xfs_scrub *sc,
439 xfs_agblock_t agbno,
440 xfs_extlen_t len)
441 {
442 struct xfs_refcount_irec rc;
443 bool has_cowflag;
444 int has_refcount;
445 int error;
446
447 if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
448 return;
449
450 /* Find the CoW staging extent. */
451 error = xfs_refcount_lookup_le(sc->sa.refc_cur,
452 agbno + XFS_REFC_COW_START, &has_refcount);
453 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
454 return;
455 if (!has_refcount) {
456 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
457 return;
458 }
459
460 error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
461 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
462 return;
463 if (!has_refcount) {
464 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
465 return;
466 }
467
468 /* CoW flag must be set, refcount must be 1. */
469 has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START);
470 if (!has_cowflag || rc.rc_refcount != 1)
471 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
472
473 /* Must be at least as long as what was passed in */
474 if (rc.rc_blockcount < len)
475 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
476 }
477
478 /*
479 * xref check that the extent is not shared. Only file data blocks
480 * can have multiple owners.
481 */
482 void
xchk_xref_is_not_shared(struct xfs_scrub * sc,xfs_agblock_t agbno,xfs_extlen_t len)483 xchk_xref_is_not_shared(
484 struct xfs_scrub *sc,
485 xfs_agblock_t agbno,
486 xfs_extlen_t len)
487 {
488 bool shared;
489 int error;
490
491 if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
492 return;
493
494 error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared);
495 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
496 return;
497 if (shared)
498 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
499 }
500