1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2018 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "scrub/xfs_scrub.h"
14 #include "scrub/scrub.h"
15 #include "scrub/common.h"
16 #include "scrub/trace.h"
17 #include "scrub/repair.h"
18 #include "scrub/bitmap.h"
19 
20 /*
21  * Set a range of this bitmap.  Caller must ensure the range is not set.
22  *
23  * This is the logical equivalent of bitmap |= mask(start, len).
24  */
25 int
xfs_bitmap_set(struct xfs_bitmap * bitmap,uint64_t start,uint64_t len)26 xfs_bitmap_set(
27 	struct xfs_bitmap	*bitmap,
28 	uint64_t		start,
29 	uint64_t		len)
30 {
31 	struct xfs_bitmap_range	*bmr;
32 
33 	bmr = kmem_alloc(sizeof(struct xfs_bitmap_range), KM_MAYFAIL);
34 	if (!bmr)
35 		return -ENOMEM;
36 
37 	INIT_LIST_HEAD(&bmr->list);
38 	bmr->start = start;
39 	bmr->len = len;
40 	list_add_tail(&bmr->list, &bitmap->list);
41 
42 	return 0;
43 }
44 
45 /* Free everything related to this bitmap. */
46 void
xfs_bitmap_destroy(struct xfs_bitmap * bitmap)47 xfs_bitmap_destroy(
48 	struct xfs_bitmap	*bitmap)
49 {
50 	struct xfs_bitmap_range	*bmr;
51 	struct xfs_bitmap_range	*n;
52 
53 	for_each_xfs_bitmap_extent(bmr, n, bitmap) {
54 		list_del(&bmr->list);
55 		kmem_free(bmr);
56 	}
57 }
58 
59 /* Set up a per-AG block bitmap. */
60 void
xfs_bitmap_init(struct xfs_bitmap * bitmap)61 xfs_bitmap_init(
62 	struct xfs_bitmap	*bitmap)
63 {
64 	INIT_LIST_HEAD(&bitmap->list);
65 }
66 
67 /* Compare two btree extents. */
68 static int
xfs_bitmap_range_cmp(void * priv,struct list_head * a,struct list_head * b)69 xfs_bitmap_range_cmp(
70 	void			*priv,
71 	struct list_head	*a,
72 	struct list_head	*b)
73 {
74 	struct xfs_bitmap_range	*ap;
75 	struct xfs_bitmap_range	*bp;
76 
77 	ap = container_of(a, struct xfs_bitmap_range, list);
78 	bp = container_of(b, struct xfs_bitmap_range, list);
79 
80 	if (ap->start > bp->start)
81 		return 1;
82 	if (ap->start < bp->start)
83 		return -1;
84 	return 0;
85 }
86 
87 /*
88  * Remove all the blocks mentioned in @sub from the extents in @bitmap.
89  *
90  * The intent is that callers will iterate the rmapbt for all of its records
91  * for a given owner to generate @bitmap; and iterate all the blocks of the
92  * metadata structures that are not being rebuilt and have the same rmapbt
93  * owner to generate @sub.  This routine subtracts all the extents
94  * mentioned in sub from all the extents linked in @bitmap, which leaves
95  * @bitmap as the list of blocks that are not accounted for, which we assume
96  * are the dead blocks of the old metadata structure.  The blocks mentioned in
97  * @bitmap can be reaped.
98  *
99  * This is the logical equivalent of bitmap &= ~sub.
100  */
101 #define LEFT_ALIGNED	(1 << 0)
102 #define RIGHT_ALIGNED	(1 << 1)
103 int
xfs_bitmap_disunion(struct xfs_bitmap * bitmap,struct xfs_bitmap * sub)104 xfs_bitmap_disunion(
105 	struct xfs_bitmap	*bitmap,
106 	struct xfs_bitmap	*sub)
107 {
108 	struct list_head	*lp;
109 	struct xfs_bitmap_range	*br;
110 	struct xfs_bitmap_range	*new_br;
111 	struct xfs_bitmap_range	*sub_br;
112 	uint64_t		sub_start;
113 	uint64_t		sub_len;
114 	int			state;
115 	int			error = 0;
116 
117 	if (list_empty(&bitmap->list) || list_empty(&sub->list))
118 		return 0;
119 	ASSERT(!list_empty(&sub->list));
120 
121 	list_sort(NULL, &bitmap->list, xfs_bitmap_range_cmp);
122 	list_sort(NULL, &sub->list, xfs_bitmap_range_cmp);
123 
124 	/*
125 	 * Now that we've sorted both lists, we iterate bitmap once, rolling
126 	 * forward through sub and/or bitmap as necessary until we find an
127 	 * overlap or reach the end of either list.  We do not reset lp to the
128 	 * head of bitmap nor do we reset sub_br to the head of sub.  The
129 	 * list traversal is similar to merge sort, but we're deleting
130 	 * instead.  In this manner we avoid O(n^2) operations.
131 	 */
132 	sub_br = list_first_entry(&sub->list, struct xfs_bitmap_range,
133 			list);
134 	lp = bitmap->list.next;
135 	while (lp != &bitmap->list) {
136 		br = list_entry(lp, struct xfs_bitmap_range, list);
137 
138 		/*
139 		 * Advance sub_br and/or br until we find a pair that
140 		 * intersect or we run out of extents.
141 		 */
142 		while (sub_br->start + sub_br->len <= br->start) {
143 			if (list_is_last(&sub_br->list, &sub->list))
144 				goto out;
145 			sub_br = list_next_entry(sub_br, list);
146 		}
147 		if (sub_br->start >= br->start + br->len) {
148 			lp = lp->next;
149 			continue;
150 		}
151 
152 		/* trim sub_br to fit the extent we have */
153 		sub_start = sub_br->start;
154 		sub_len = sub_br->len;
155 		if (sub_br->start < br->start) {
156 			sub_len -= br->start - sub_br->start;
157 			sub_start = br->start;
158 		}
159 		if (sub_len > br->len)
160 			sub_len = br->len;
161 
162 		state = 0;
163 		if (sub_start == br->start)
164 			state |= LEFT_ALIGNED;
165 		if (sub_start + sub_len == br->start + br->len)
166 			state |= RIGHT_ALIGNED;
167 		switch (state) {
168 		case LEFT_ALIGNED:
169 			/* Coincides with only the left. */
170 			br->start += sub_len;
171 			br->len -= sub_len;
172 			break;
173 		case RIGHT_ALIGNED:
174 			/* Coincides with only the right. */
175 			br->len -= sub_len;
176 			lp = lp->next;
177 			break;
178 		case LEFT_ALIGNED | RIGHT_ALIGNED:
179 			/* Total overlap, just delete ex. */
180 			lp = lp->next;
181 			list_del(&br->list);
182 			kmem_free(br);
183 			break;
184 		case 0:
185 			/*
186 			 * Deleting from the middle: add the new right extent
187 			 * and then shrink the left extent.
188 			 */
189 			new_br = kmem_alloc(sizeof(struct xfs_bitmap_range),
190 					KM_MAYFAIL);
191 			if (!new_br) {
192 				error = -ENOMEM;
193 				goto out;
194 			}
195 			INIT_LIST_HEAD(&new_br->list);
196 			new_br->start = sub_start + sub_len;
197 			new_br->len = br->start + br->len - new_br->start;
198 			list_add(&new_br->list, &br->list);
199 			br->len = sub_start - br->start;
200 			lp = lp->next;
201 			break;
202 		default:
203 			ASSERT(0);
204 			break;
205 		}
206 	}
207 
208 out:
209 	return error;
210 }
211 #undef LEFT_ALIGNED
212 #undef RIGHT_ALIGNED
213 
214 /*
215  * Record all btree blocks seen while iterating all records of a btree.
216  *
217  * We know that the btree query_all function starts at the left edge and walks
218  * towards the right edge of the tree.  Therefore, we know that we can walk up
219  * the btree cursor towards the root; if the pointer for a given level points
220  * to the first record/key in that block, we haven't seen this block before;
221  * and therefore we need to remember that we saw this block in the btree.
222  *
223  * So if our btree is:
224  *
225  *    4
226  *  / | \
227  * 1  2  3
228  *
229  * Pretend for this example that each leaf block has 100 btree records.  For
230  * the first btree record, we'll observe that bc_ptrs[0] == 1, so we record
231  * that we saw block 1.  Then we observe that bc_ptrs[1] == 1, so we record
232  * block 4.  The list is [1, 4].
233  *
234  * For the second btree record, we see that bc_ptrs[0] == 2, so we exit the
235  * loop.  The list remains [1, 4].
236  *
237  * For the 101st btree record, we've moved onto leaf block 2.  Now
238  * bc_ptrs[0] == 1 again, so we record that we saw block 2.  We see that
239  * bc_ptrs[1] == 2, so we exit the loop.  The list is now [1, 4, 2].
240  *
241  * For the 102nd record, bc_ptrs[0] == 2, so we continue.
242  *
243  * For the 201st record, we've moved on to leaf block 3.  bc_ptrs[0] == 1, so
244  * we add 3 to the list.  Now it is [1, 4, 2, 3].
245  *
246  * For the 300th record we just exit, with the list being [1, 4, 2, 3].
247  */
248 
249 /*
250  * Record all the buffers pointed to by the btree cursor.  Callers already
251  * engaged in a btree walk should call this function to capture the list of
252  * blocks going from the leaf towards the root.
253  */
254 int
xfs_bitmap_set_btcur_path(struct xfs_bitmap * bitmap,struct xfs_btree_cur * cur)255 xfs_bitmap_set_btcur_path(
256 	struct xfs_bitmap	*bitmap,
257 	struct xfs_btree_cur	*cur)
258 {
259 	struct xfs_buf		*bp;
260 	xfs_fsblock_t		fsb;
261 	int			i;
262 	int			error;
263 
264 	for (i = 0; i < cur->bc_nlevels && cur->bc_ptrs[i] == 1; i++) {
265 		xfs_btree_get_block(cur, i, &bp);
266 		if (!bp)
267 			continue;
268 		fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
269 		error = xfs_bitmap_set(bitmap, fsb, 1);
270 		if (error)
271 			return error;
272 	}
273 
274 	return 0;
275 }
276 
277 /* Collect a btree's block in the bitmap. */
278 STATIC int
xfs_bitmap_collect_btblock(struct xfs_btree_cur * cur,int level,void * priv)279 xfs_bitmap_collect_btblock(
280 	struct xfs_btree_cur	*cur,
281 	int			level,
282 	void			*priv)
283 {
284 	struct xfs_bitmap	*bitmap = priv;
285 	struct xfs_buf		*bp;
286 	xfs_fsblock_t		fsbno;
287 
288 	xfs_btree_get_block(cur, level, &bp);
289 	if (!bp)
290 		return 0;
291 
292 	fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
293 	return xfs_bitmap_set(bitmap, fsbno, 1);
294 }
295 
296 /* Walk the btree and mark the bitmap wherever a btree block is found. */
297 int
xfs_bitmap_set_btblocks(struct xfs_bitmap * bitmap,struct xfs_btree_cur * cur)298 xfs_bitmap_set_btblocks(
299 	struct xfs_bitmap	*bitmap,
300 	struct xfs_btree_cur	*cur)
301 {
302 	return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock, bitmap);
303 }
304