1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
9 #include "ctree.h"
10 #include "delayed-ref.h"
11 #include "transaction.h"
12 #include "qgroup.h"
13 
14 struct kmem_cache *btrfs_delayed_ref_head_cachep;
15 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
16 struct kmem_cache *btrfs_delayed_data_ref_cachep;
17 struct kmem_cache *btrfs_delayed_extent_op_cachep;
18 /*
19  * delayed back reference update tracking.  For subvolume trees
20  * we queue up extent allocations and backref maintenance for
21  * delayed processing.   This avoids deep call chains where we
22  * add extents in the middle of btrfs_search_slot, and it allows
23  * us to buffer up frequently modified backrefs in an rb tree instead
24  * of hammering updates on the extent allocation tree.
25  */
26 
27 /*
28  * compare two delayed tree backrefs with same bytenr and type
29  */
comp_tree_refs(struct btrfs_delayed_tree_ref * ref1,struct btrfs_delayed_tree_ref * ref2)30 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
31 			  struct btrfs_delayed_tree_ref *ref2)
32 {
33 	if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
34 		if (ref1->root < ref2->root)
35 			return -1;
36 		if (ref1->root > ref2->root)
37 			return 1;
38 	} else {
39 		if (ref1->parent < ref2->parent)
40 			return -1;
41 		if (ref1->parent > ref2->parent)
42 			return 1;
43 	}
44 	return 0;
45 }
46 
47 /*
48  * compare two delayed data backrefs with same bytenr and type
49  */
comp_data_refs(struct btrfs_delayed_data_ref * ref1,struct btrfs_delayed_data_ref * ref2)50 static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
51 			  struct btrfs_delayed_data_ref *ref2)
52 {
53 	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
54 		if (ref1->root < ref2->root)
55 			return -1;
56 		if (ref1->root > ref2->root)
57 			return 1;
58 		if (ref1->objectid < ref2->objectid)
59 			return -1;
60 		if (ref1->objectid > ref2->objectid)
61 			return 1;
62 		if (ref1->offset < ref2->offset)
63 			return -1;
64 		if (ref1->offset > ref2->offset)
65 			return 1;
66 	} else {
67 		if (ref1->parent < ref2->parent)
68 			return -1;
69 		if (ref1->parent > ref2->parent)
70 			return 1;
71 	}
72 	return 0;
73 }
74 
comp_refs(struct btrfs_delayed_ref_node * ref1,struct btrfs_delayed_ref_node * ref2,bool check_seq)75 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
76 		     struct btrfs_delayed_ref_node *ref2,
77 		     bool check_seq)
78 {
79 	int ret = 0;
80 
81 	if (ref1->type < ref2->type)
82 		return -1;
83 	if (ref1->type > ref2->type)
84 		return 1;
85 	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
86 	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
87 		ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
88 				     btrfs_delayed_node_to_tree_ref(ref2));
89 	else
90 		ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
91 				     btrfs_delayed_node_to_data_ref(ref2));
92 	if (ret)
93 		return ret;
94 	if (check_seq) {
95 		if (ref1->seq < ref2->seq)
96 			return -1;
97 		if (ref1->seq > ref2->seq)
98 			return 1;
99 	}
100 	return 0;
101 }
102 
103 /* insert a new ref to head ref rbtree */
htree_insert(struct rb_root * root,struct rb_node * node)104 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
105 						   struct rb_node *node)
106 {
107 	struct rb_node **p = &root->rb_node;
108 	struct rb_node *parent_node = NULL;
109 	struct btrfs_delayed_ref_head *entry;
110 	struct btrfs_delayed_ref_head *ins;
111 	u64 bytenr;
112 
113 	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
114 	bytenr = ins->bytenr;
115 	while (*p) {
116 		parent_node = *p;
117 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
118 				 href_node);
119 
120 		if (bytenr < entry->bytenr)
121 			p = &(*p)->rb_left;
122 		else if (bytenr > entry->bytenr)
123 			p = &(*p)->rb_right;
124 		else
125 			return entry;
126 	}
127 
128 	rb_link_node(node, parent_node, p);
129 	rb_insert_color(node, root);
130 	return NULL;
131 }
132 
tree_insert(struct rb_root * root,struct btrfs_delayed_ref_node * ins)133 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
134 		struct btrfs_delayed_ref_node *ins)
135 {
136 	struct rb_node **p = &root->rb_node;
137 	struct rb_node *node = &ins->ref_node;
138 	struct rb_node *parent_node = NULL;
139 	struct btrfs_delayed_ref_node *entry;
140 
141 	while (*p) {
142 		int comp;
143 
144 		parent_node = *p;
145 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
146 				 ref_node);
147 		comp = comp_refs(ins, entry, true);
148 		if (comp < 0)
149 			p = &(*p)->rb_left;
150 		else if (comp > 0)
151 			p = &(*p)->rb_right;
152 		else
153 			return entry;
154 	}
155 
156 	rb_link_node(node, parent_node, p);
157 	rb_insert_color(node, root);
158 	return NULL;
159 }
160 
161 /*
162  * find an head entry based on bytenr. This returns the delayed ref
163  * head if it was able to find one, or NULL if nothing was in that spot.
164  * If return_bigger is given, the next bigger entry is returned if no exact
165  * match is found.
166  */
167 static struct btrfs_delayed_ref_head *
find_ref_head(struct rb_root * root,u64 bytenr,int return_bigger)168 find_ref_head(struct rb_root *root, u64 bytenr,
169 	      int return_bigger)
170 {
171 	struct rb_node *n;
172 	struct btrfs_delayed_ref_head *entry;
173 
174 	n = root->rb_node;
175 	entry = NULL;
176 	while (n) {
177 		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
178 
179 		if (bytenr < entry->bytenr)
180 			n = n->rb_left;
181 		else if (bytenr > entry->bytenr)
182 			n = n->rb_right;
183 		else
184 			return entry;
185 	}
186 	if (entry && return_bigger) {
187 		if (bytenr > entry->bytenr) {
188 			n = rb_next(&entry->href_node);
189 			if (!n)
190 				n = rb_first(root);
191 			entry = rb_entry(n, struct btrfs_delayed_ref_head,
192 					 href_node);
193 			return entry;
194 		}
195 		return entry;
196 	}
197 	return NULL;
198 }
199 
btrfs_delayed_ref_lock(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head)200 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
201 			   struct btrfs_delayed_ref_head *head)
202 {
203 	struct btrfs_delayed_ref_root *delayed_refs;
204 
205 	delayed_refs = &trans->transaction->delayed_refs;
206 	lockdep_assert_held(&delayed_refs->lock);
207 	if (mutex_trylock(&head->mutex))
208 		return 0;
209 
210 	refcount_inc(&head->refs);
211 	spin_unlock(&delayed_refs->lock);
212 
213 	mutex_lock(&head->mutex);
214 	spin_lock(&delayed_refs->lock);
215 	if (RB_EMPTY_NODE(&head->href_node)) {
216 		mutex_unlock(&head->mutex);
217 		btrfs_put_delayed_ref_head(head);
218 		return -EAGAIN;
219 	}
220 	btrfs_put_delayed_ref_head(head);
221 	return 0;
222 }
223 
drop_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_ref_node * ref)224 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
225 				    struct btrfs_delayed_ref_root *delayed_refs,
226 				    struct btrfs_delayed_ref_head *head,
227 				    struct btrfs_delayed_ref_node *ref)
228 {
229 	lockdep_assert_held(&head->lock);
230 	rb_erase(&ref->ref_node, &head->ref_tree);
231 	RB_CLEAR_NODE(&ref->ref_node);
232 	if (!list_empty(&ref->add_list))
233 		list_del(&ref->add_list);
234 	ref->in_tree = 0;
235 	btrfs_put_delayed_ref(ref);
236 	atomic_dec(&delayed_refs->num_entries);
237 }
238 
merge_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_ref_node * ref,u64 seq)239 static bool merge_ref(struct btrfs_trans_handle *trans,
240 		      struct btrfs_delayed_ref_root *delayed_refs,
241 		      struct btrfs_delayed_ref_head *head,
242 		      struct btrfs_delayed_ref_node *ref,
243 		      u64 seq)
244 {
245 	struct btrfs_delayed_ref_node *next;
246 	struct rb_node *node = rb_next(&ref->ref_node);
247 	bool done = false;
248 
249 	while (!done && node) {
250 		int mod;
251 
252 		next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
253 		node = rb_next(node);
254 		if (seq && next->seq >= seq)
255 			break;
256 		if (comp_refs(ref, next, false))
257 			break;
258 
259 		if (ref->action == next->action) {
260 			mod = next->ref_mod;
261 		} else {
262 			if (ref->ref_mod < next->ref_mod) {
263 				swap(ref, next);
264 				done = true;
265 			}
266 			mod = -next->ref_mod;
267 		}
268 
269 		drop_delayed_ref(trans, delayed_refs, head, next);
270 		ref->ref_mod += mod;
271 		if (ref->ref_mod == 0) {
272 			drop_delayed_ref(trans, delayed_refs, head, ref);
273 			done = true;
274 		} else {
275 			/*
276 			 * Can't have multiples of the same ref on a tree block.
277 			 */
278 			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
279 				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
280 		}
281 	}
282 
283 	return done;
284 }
285 
btrfs_merge_delayed_refs(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)286 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
287 			      struct btrfs_delayed_ref_root *delayed_refs,
288 			      struct btrfs_delayed_ref_head *head)
289 {
290 	struct btrfs_fs_info *fs_info = trans->fs_info;
291 	struct btrfs_delayed_ref_node *ref;
292 	struct rb_node *node;
293 	u64 seq = 0;
294 
295 	lockdep_assert_held(&head->lock);
296 
297 	if (RB_EMPTY_ROOT(&head->ref_tree))
298 		return;
299 
300 	/* We don't have too many refs to merge for data. */
301 	if (head->is_data)
302 		return;
303 
304 	read_lock(&fs_info->tree_mod_log_lock);
305 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
306 		struct seq_list *elem;
307 
308 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
309 					struct seq_list, list);
310 		seq = elem->seq;
311 	}
312 	read_unlock(&fs_info->tree_mod_log_lock);
313 
314 again:
315 	for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
316 		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
317 		if (seq && ref->seq >= seq)
318 			continue;
319 		if (merge_ref(trans, delayed_refs, head, ref, seq))
320 			goto again;
321 	}
322 }
323 
btrfs_check_delayed_seq(struct btrfs_fs_info * fs_info,u64 seq)324 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
325 {
326 	struct seq_list *elem;
327 	int ret = 0;
328 
329 	read_lock(&fs_info->tree_mod_log_lock);
330 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
331 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
332 					struct seq_list, list);
333 		if (seq >= elem->seq) {
334 			btrfs_debug(fs_info,
335 				"holding back delayed_ref %#x.%x, lowest is %#x.%x",
336 				(u32)(seq >> 32), (u32)seq,
337 				(u32)(elem->seq >> 32), (u32)elem->seq);
338 			ret = 1;
339 		}
340 	}
341 
342 	read_unlock(&fs_info->tree_mod_log_lock);
343 	return ret;
344 }
345 
346 struct btrfs_delayed_ref_head *
btrfs_select_ref_head(struct btrfs_trans_handle * trans)347 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
348 {
349 	struct btrfs_delayed_ref_root *delayed_refs;
350 	struct btrfs_delayed_ref_head *head;
351 	u64 start;
352 	bool loop = false;
353 
354 	delayed_refs = &trans->transaction->delayed_refs;
355 
356 again:
357 	start = delayed_refs->run_delayed_start;
358 	head = find_ref_head(&delayed_refs->href_root, start, 1);
359 	if (!head && !loop) {
360 		delayed_refs->run_delayed_start = 0;
361 		start = 0;
362 		loop = true;
363 		head = find_ref_head(&delayed_refs->href_root, start, 1);
364 		if (!head)
365 			return NULL;
366 	} else if (!head && loop) {
367 		return NULL;
368 	}
369 
370 	while (head->processing) {
371 		struct rb_node *node;
372 
373 		node = rb_next(&head->href_node);
374 		if (!node) {
375 			if (loop)
376 				return NULL;
377 			delayed_refs->run_delayed_start = 0;
378 			start = 0;
379 			loop = true;
380 			goto again;
381 		}
382 		head = rb_entry(node, struct btrfs_delayed_ref_head,
383 				href_node);
384 	}
385 
386 	head->processing = 1;
387 	WARN_ON(delayed_refs->num_heads_ready == 0);
388 	delayed_refs->num_heads_ready--;
389 	delayed_refs->run_delayed_start = head->bytenr +
390 		head->num_bytes;
391 	return head;
392 }
393 
394 /*
395  * Helper to insert the ref_node to the tail or merge with tail.
396  *
397  * Return 0 for insert.
398  * Return >0 for merge.
399  */
insert_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_root * root,struct btrfs_delayed_ref_head * href,struct btrfs_delayed_ref_node * ref)400 static int insert_delayed_ref(struct btrfs_trans_handle *trans,
401 			      struct btrfs_delayed_ref_root *root,
402 			      struct btrfs_delayed_ref_head *href,
403 			      struct btrfs_delayed_ref_node *ref)
404 {
405 	struct btrfs_delayed_ref_node *exist;
406 	int mod;
407 	int ret = 0;
408 
409 	spin_lock(&href->lock);
410 	exist = tree_insert(&href->ref_tree, ref);
411 	if (!exist)
412 		goto inserted;
413 
414 	/* Now we are sure we can merge */
415 	ret = 1;
416 	if (exist->action == ref->action) {
417 		mod = ref->ref_mod;
418 	} else {
419 		/* Need to change action */
420 		if (exist->ref_mod < ref->ref_mod) {
421 			exist->action = ref->action;
422 			mod = -exist->ref_mod;
423 			exist->ref_mod = ref->ref_mod;
424 			if (ref->action == BTRFS_ADD_DELAYED_REF)
425 				list_add_tail(&exist->add_list,
426 					      &href->ref_add_list);
427 			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
428 				ASSERT(!list_empty(&exist->add_list));
429 				list_del(&exist->add_list);
430 			} else {
431 				ASSERT(0);
432 			}
433 		} else
434 			mod = -ref->ref_mod;
435 	}
436 	exist->ref_mod += mod;
437 
438 	/* remove existing tail if its ref_mod is zero */
439 	if (exist->ref_mod == 0)
440 		drop_delayed_ref(trans, root, href, exist);
441 	spin_unlock(&href->lock);
442 	return ret;
443 inserted:
444 	if (ref->action == BTRFS_ADD_DELAYED_REF)
445 		list_add_tail(&ref->add_list, &href->ref_add_list);
446 	atomic_inc(&root->num_entries);
447 	spin_unlock(&href->lock);
448 	return ret;
449 }
450 
451 /*
452  * helper function to update the accounting in the head ref
453  * existing and update must have the same bytenr
454  */
455 static noinline void
update_existing_head_ref(struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * existing,struct btrfs_delayed_ref_head * update,int * old_ref_mod_ret)456 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
457 			 struct btrfs_delayed_ref_head *existing,
458 			 struct btrfs_delayed_ref_head *update,
459 			 int *old_ref_mod_ret)
460 {
461 	int old_ref_mod;
462 
463 	BUG_ON(existing->is_data != update->is_data);
464 
465 	spin_lock(&existing->lock);
466 	if (update->must_insert_reserved) {
467 		/* if the extent was freed and then
468 		 * reallocated before the delayed ref
469 		 * entries were processed, we can end up
470 		 * with an existing head ref without
471 		 * the must_insert_reserved flag set.
472 		 * Set it again here
473 		 */
474 		existing->must_insert_reserved = update->must_insert_reserved;
475 
476 		/*
477 		 * update the num_bytes so we make sure the accounting
478 		 * is done correctly
479 		 */
480 		existing->num_bytes = update->num_bytes;
481 
482 	}
483 
484 	if (update->extent_op) {
485 		if (!existing->extent_op) {
486 			existing->extent_op = update->extent_op;
487 		} else {
488 			if (update->extent_op->update_key) {
489 				memcpy(&existing->extent_op->key,
490 				       &update->extent_op->key,
491 				       sizeof(update->extent_op->key));
492 				existing->extent_op->update_key = true;
493 			}
494 			if (update->extent_op->update_flags) {
495 				existing->extent_op->flags_to_set |=
496 					update->extent_op->flags_to_set;
497 				existing->extent_op->update_flags = true;
498 			}
499 			btrfs_free_delayed_extent_op(update->extent_op);
500 		}
501 	}
502 	/*
503 	 * update the reference mod on the head to reflect this new operation,
504 	 * only need the lock for this case cause we could be processing it
505 	 * currently, for refs we just added we know we're a-ok.
506 	 */
507 	old_ref_mod = existing->total_ref_mod;
508 	if (old_ref_mod_ret)
509 		*old_ref_mod_ret = old_ref_mod;
510 	existing->ref_mod += update->ref_mod;
511 	existing->total_ref_mod += update->ref_mod;
512 
513 	/*
514 	 * If we are going to from a positive ref mod to a negative or vice
515 	 * versa we need to make sure to adjust pending_csums accordingly.
516 	 */
517 	if (existing->is_data) {
518 		if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
519 			delayed_refs->pending_csums -= existing->num_bytes;
520 		if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
521 			delayed_refs->pending_csums += existing->num_bytes;
522 	}
523 	spin_unlock(&existing->lock);
524 }
525 
init_delayed_ref_head(struct btrfs_delayed_ref_head * head_ref,struct btrfs_qgroup_extent_record * qrecord,u64 bytenr,u64 num_bytes,u64 ref_root,u64 reserved,int action,bool is_data,bool is_system)526 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
527 				  struct btrfs_qgroup_extent_record *qrecord,
528 				  u64 bytenr, u64 num_bytes, u64 ref_root,
529 				  u64 reserved, int action, bool is_data,
530 				  bool is_system)
531 {
532 	int count_mod = 1;
533 	int must_insert_reserved = 0;
534 
535 	/* If reserved is provided, it must be a data extent. */
536 	BUG_ON(!is_data && reserved);
537 
538 	/*
539 	 * The head node stores the sum of all the mods, so dropping a ref
540 	 * should drop the sum in the head node by one.
541 	 */
542 	if (action == BTRFS_UPDATE_DELAYED_HEAD)
543 		count_mod = 0;
544 	else if (action == BTRFS_DROP_DELAYED_REF)
545 		count_mod = -1;
546 
547 	/*
548 	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
549 	 * accounting when the extent is finally added, or if a later
550 	 * modification deletes the delayed ref without ever inserting the
551 	 * extent into the extent allocation tree.  ref->must_insert_reserved
552 	 * is the flag used to record that accounting mods are required.
553 	 *
554 	 * Once we record must_insert_reserved, switch the action to
555 	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
556 	 */
557 	if (action == BTRFS_ADD_DELAYED_EXTENT)
558 		must_insert_reserved = 1;
559 	else
560 		must_insert_reserved = 0;
561 
562 	refcount_set(&head_ref->refs, 1);
563 	head_ref->bytenr = bytenr;
564 	head_ref->num_bytes = num_bytes;
565 	head_ref->ref_mod = count_mod;
566 	head_ref->must_insert_reserved = must_insert_reserved;
567 	head_ref->is_data = is_data;
568 	head_ref->is_system = is_system;
569 	head_ref->ref_tree = RB_ROOT;
570 	INIT_LIST_HEAD(&head_ref->ref_add_list);
571 	RB_CLEAR_NODE(&head_ref->href_node);
572 	head_ref->processing = 0;
573 	head_ref->total_ref_mod = count_mod;
574 	head_ref->qgroup_reserved = 0;
575 	head_ref->qgroup_ref_root = 0;
576 	spin_lock_init(&head_ref->lock);
577 	mutex_init(&head_ref->mutex);
578 
579 	if (qrecord) {
580 		if (ref_root && reserved) {
581 			head_ref->qgroup_ref_root = ref_root;
582 			head_ref->qgroup_reserved = reserved;
583 		}
584 
585 		qrecord->bytenr = bytenr;
586 		qrecord->num_bytes = num_bytes;
587 		qrecord->old_roots = NULL;
588 	}
589 }
590 
591 /*
592  * helper function to actually insert a head node into the rbtree.
593  * this does all the dirty work in terms of maintaining the correct
594  * overall modification count.
595  */
596 static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head_ref,struct btrfs_qgroup_extent_record * qrecord,int action,int * qrecord_inserted_ret,int * old_ref_mod,int * new_ref_mod)597 add_delayed_ref_head(struct btrfs_trans_handle *trans,
598 		     struct btrfs_delayed_ref_head *head_ref,
599 		     struct btrfs_qgroup_extent_record *qrecord,
600 		     int action, int *qrecord_inserted_ret,
601 		     int *old_ref_mod, int *new_ref_mod)
602 {
603 	struct btrfs_delayed_ref_head *existing;
604 	struct btrfs_delayed_ref_root *delayed_refs;
605 	int qrecord_inserted = 0;
606 
607 	delayed_refs = &trans->transaction->delayed_refs;
608 
609 	/* Record qgroup extent info if provided */
610 	if (qrecord) {
611 		if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
612 					delayed_refs, qrecord))
613 			kfree(qrecord);
614 		else
615 			qrecord_inserted = 1;
616 	}
617 
618 	trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
619 
620 	existing = htree_insert(&delayed_refs->href_root,
621 				&head_ref->href_node);
622 	if (existing) {
623 		WARN_ON(qrecord && head_ref->qgroup_ref_root
624 			&& head_ref->qgroup_reserved
625 			&& existing->qgroup_ref_root
626 			&& existing->qgroup_reserved);
627 		update_existing_head_ref(delayed_refs, existing, head_ref,
628 					 old_ref_mod);
629 		/*
630 		 * we've updated the existing ref, free the newly
631 		 * allocated ref
632 		 */
633 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
634 		head_ref = existing;
635 	} else {
636 		if (old_ref_mod)
637 			*old_ref_mod = 0;
638 		if (head_ref->is_data && head_ref->ref_mod < 0)
639 			delayed_refs->pending_csums += head_ref->num_bytes;
640 		delayed_refs->num_heads++;
641 		delayed_refs->num_heads_ready++;
642 		atomic_inc(&delayed_refs->num_entries);
643 		trans->delayed_ref_updates++;
644 	}
645 	if (qrecord_inserted_ret)
646 		*qrecord_inserted_ret = qrecord_inserted;
647 	if (new_ref_mod)
648 		*new_ref_mod = head_ref->total_ref_mod;
649 
650 	return head_ref;
651 }
652 
653 /*
654  * init_delayed_ref_common - Initialize the structure which represents a
655  *			     modification to a an extent.
656  *
657  * @fs_info:    Internal to the mounted filesystem mount structure.
658  *
659  * @ref:	The structure which is going to be initialized.
660  *
661  * @bytenr:	The logical address of the extent for which a modification is
662  *		going to be recorded.
663  *
664  * @num_bytes:  Size of the extent whose modification is being recorded.
665  *
666  * @ref_root:	The id of the root where this modification has originated, this
667  *		can be either one of the well-known metadata trees or the
668  *		subvolume id which references this extent.
669  *
670  * @action:	Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
671  *		BTRFS_ADD_DELAYED_EXTENT
672  *
673  * @ref_type:	Holds the type of the extent which is being recorded, can be
674  *		one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
675  *		when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
676  *		BTRFS_EXTENT_DATA_REF_KEY when recording data extent
677  */
init_delayed_ref_common(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_node * ref,u64 bytenr,u64 num_bytes,u64 ref_root,int action,u8 ref_type)678 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
679 				    struct btrfs_delayed_ref_node *ref,
680 				    u64 bytenr, u64 num_bytes, u64 ref_root,
681 				    int action, u8 ref_type)
682 {
683 	u64 seq = 0;
684 
685 	if (action == BTRFS_ADD_DELAYED_EXTENT)
686 		action = BTRFS_ADD_DELAYED_REF;
687 
688 	if (is_fstree(ref_root))
689 		seq = atomic64_read(&fs_info->tree_mod_seq);
690 
691 	refcount_set(&ref->refs, 1);
692 	ref->bytenr = bytenr;
693 	ref->num_bytes = num_bytes;
694 	ref->ref_mod = 1;
695 	ref->action = action;
696 	ref->is_head = 0;
697 	ref->in_tree = 1;
698 	ref->seq = seq;
699 	ref->type = ref_type;
700 	RB_CLEAR_NODE(&ref->ref_node);
701 	INIT_LIST_HEAD(&ref->add_list);
702 }
703 
704 /*
705  * add a delayed tree ref.  This does all of the accounting required
706  * to make sure the delayed ref is eventually processed before this
707  * transaction commits.
708  */
btrfs_add_delayed_tree_ref(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,u64 parent,u64 ref_root,int level,int action,struct btrfs_delayed_extent_op * extent_op,int * old_ref_mod,int * new_ref_mod)709 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
710 			       u64 bytenr, u64 num_bytes, u64 parent,
711 			       u64 ref_root,  int level, int action,
712 			       struct btrfs_delayed_extent_op *extent_op,
713 			       int *old_ref_mod, int *new_ref_mod)
714 {
715 	struct btrfs_fs_info *fs_info = trans->fs_info;
716 	struct btrfs_delayed_tree_ref *ref;
717 	struct btrfs_delayed_ref_head *head_ref;
718 	struct btrfs_delayed_ref_root *delayed_refs;
719 	struct btrfs_qgroup_extent_record *record = NULL;
720 	int qrecord_inserted;
721 	bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
722 	int ret;
723 	u8 ref_type;
724 
725 	BUG_ON(extent_op && extent_op->is_data);
726 	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
727 	if (!ref)
728 		return -ENOMEM;
729 
730 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
731 	if (!head_ref) {
732 		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
733 		return -ENOMEM;
734 	}
735 
736 	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
737 	    is_fstree(ref_root)) {
738 		record = kmalloc(sizeof(*record), GFP_NOFS);
739 		if (!record) {
740 			kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
741 			kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
742 			return -ENOMEM;
743 		}
744 	}
745 
746 	if (parent)
747 		ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
748 	else
749 		ref_type = BTRFS_TREE_BLOCK_REF_KEY;
750 
751 	init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
752 				ref_root, action, ref_type);
753 	ref->root = ref_root;
754 	ref->parent = parent;
755 	ref->level = level;
756 
757 	init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
758 			      ref_root, 0, action, false, is_system);
759 	head_ref->extent_op = extent_op;
760 
761 	delayed_refs = &trans->transaction->delayed_refs;
762 	spin_lock(&delayed_refs->lock);
763 
764 	/*
765 	 * insert both the head node and the new ref without dropping
766 	 * the spin lock
767 	 */
768 	head_ref = add_delayed_ref_head(trans, head_ref, record,
769 					action, &qrecord_inserted,
770 					old_ref_mod, new_ref_mod);
771 
772 	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
773 	spin_unlock(&delayed_refs->lock);
774 
775 	trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
776 				   action == BTRFS_ADD_DELAYED_EXTENT ?
777 				   BTRFS_ADD_DELAYED_REF : action);
778 	if (ret > 0)
779 		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
780 
781 	if (qrecord_inserted)
782 		btrfs_qgroup_trace_extent_post(fs_info, record);
783 
784 	return 0;
785 }
786 
787 /*
788  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
789  */
btrfs_add_delayed_data_ref(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,u64 parent,u64 ref_root,u64 owner,u64 offset,u64 reserved,int action,int * old_ref_mod,int * new_ref_mod)790 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
791 			       u64 bytenr, u64 num_bytes,
792 			       u64 parent, u64 ref_root,
793 			       u64 owner, u64 offset, u64 reserved, int action,
794 			       int *old_ref_mod, int *new_ref_mod)
795 {
796 	struct btrfs_fs_info *fs_info = trans->fs_info;
797 	struct btrfs_delayed_data_ref *ref;
798 	struct btrfs_delayed_ref_head *head_ref;
799 	struct btrfs_delayed_ref_root *delayed_refs;
800 	struct btrfs_qgroup_extent_record *record = NULL;
801 	int qrecord_inserted;
802 	int ret;
803 	u8 ref_type;
804 
805 	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
806 	if (!ref)
807 		return -ENOMEM;
808 
809 	if (parent)
810 	        ref_type = BTRFS_SHARED_DATA_REF_KEY;
811 	else
812 	        ref_type = BTRFS_EXTENT_DATA_REF_KEY;
813 	init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
814 				ref_root, action, ref_type);
815 	ref->root = ref_root;
816 	ref->parent = parent;
817 	ref->objectid = owner;
818 	ref->offset = offset;
819 
820 
821 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
822 	if (!head_ref) {
823 		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
824 		return -ENOMEM;
825 	}
826 
827 	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
828 	    is_fstree(ref_root)) {
829 		record = kmalloc(sizeof(*record), GFP_NOFS);
830 		if (!record) {
831 			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
832 			kmem_cache_free(btrfs_delayed_ref_head_cachep,
833 					head_ref);
834 			return -ENOMEM;
835 		}
836 	}
837 
838 	init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
839 			      reserved, action, true, false);
840 	head_ref->extent_op = NULL;
841 
842 	delayed_refs = &trans->transaction->delayed_refs;
843 	spin_lock(&delayed_refs->lock);
844 
845 	/*
846 	 * insert both the head node and the new ref without dropping
847 	 * the spin lock
848 	 */
849 	head_ref = add_delayed_ref_head(trans, head_ref, record,
850 					action, &qrecord_inserted,
851 					old_ref_mod, new_ref_mod);
852 
853 	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
854 	spin_unlock(&delayed_refs->lock);
855 
856 	trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
857 				   action == BTRFS_ADD_DELAYED_EXTENT ?
858 				   BTRFS_ADD_DELAYED_REF : action);
859 	if (ret > 0)
860 		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
861 
862 
863 	if (qrecord_inserted)
864 		return btrfs_qgroup_trace_extent_post(fs_info, record);
865 	return 0;
866 }
867 
btrfs_add_delayed_extent_op(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,struct btrfs_delayed_extent_op * extent_op)868 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
869 				struct btrfs_trans_handle *trans,
870 				u64 bytenr, u64 num_bytes,
871 				struct btrfs_delayed_extent_op *extent_op)
872 {
873 	struct btrfs_delayed_ref_head *head_ref;
874 	struct btrfs_delayed_ref_root *delayed_refs;
875 
876 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
877 	if (!head_ref)
878 		return -ENOMEM;
879 
880 	init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
881 			      BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
882 			      false);
883 	head_ref->extent_op = extent_op;
884 
885 	delayed_refs = &trans->transaction->delayed_refs;
886 	spin_lock(&delayed_refs->lock);
887 
888 	add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
889 			     NULL, NULL, NULL);
890 
891 	spin_unlock(&delayed_refs->lock);
892 	return 0;
893 }
894 
895 /*
896  * this does a simple search for the head node for a given extent.
897  * It must be called with the delayed ref spinlock held, and it returns
898  * the head node if any where found, or NULL if not.
899  */
900 struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root * delayed_refs,u64 bytenr)901 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
902 {
903 	return find_ref_head(&delayed_refs->href_root, bytenr, 0);
904 }
905 
btrfs_delayed_ref_exit(void)906 void __cold btrfs_delayed_ref_exit(void)
907 {
908 	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
909 	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
910 	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
911 	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
912 }
913 
btrfs_delayed_ref_init(void)914 int __init btrfs_delayed_ref_init(void)
915 {
916 	btrfs_delayed_ref_head_cachep = kmem_cache_create(
917 				"btrfs_delayed_ref_head",
918 				sizeof(struct btrfs_delayed_ref_head), 0,
919 				SLAB_MEM_SPREAD, NULL);
920 	if (!btrfs_delayed_ref_head_cachep)
921 		goto fail;
922 
923 	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
924 				"btrfs_delayed_tree_ref",
925 				sizeof(struct btrfs_delayed_tree_ref), 0,
926 				SLAB_MEM_SPREAD, NULL);
927 	if (!btrfs_delayed_tree_ref_cachep)
928 		goto fail;
929 
930 	btrfs_delayed_data_ref_cachep = kmem_cache_create(
931 				"btrfs_delayed_data_ref",
932 				sizeof(struct btrfs_delayed_data_ref), 0,
933 				SLAB_MEM_SPREAD, NULL);
934 	if (!btrfs_delayed_data_ref_cachep)
935 		goto fail;
936 
937 	btrfs_delayed_extent_op_cachep = kmem_cache_create(
938 				"btrfs_delayed_extent_op",
939 				sizeof(struct btrfs_delayed_extent_op), 0,
940 				SLAB_MEM_SPREAD, NULL);
941 	if (!btrfs_delayed_extent_op_cachep)
942 		goto fail;
943 
944 	return 0;
945 fail:
946 	btrfs_delayed_ref_exit();
947 	return -ENOMEM;
948 }
949