1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 */
6
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include <linux/sched/mm.h>
10 #include "delayed-inode.h"
11 #include "disk-io.h"
12 #include "transaction.h"
13 #include "ctree.h"
14 #include "qgroup.h"
15
16 #define BTRFS_DELAYED_WRITEBACK 512
17 #define BTRFS_DELAYED_BACKGROUND 128
18 #define BTRFS_DELAYED_BATCH 16
19
20 static struct kmem_cache *delayed_node_cache;
21
btrfs_delayed_inode_init(void)22 int __init btrfs_delayed_inode_init(void)
23 {
24 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
25 sizeof(struct btrfs_delayed_node),
26 0,
27 SLAB_MEM_SPREAD,
28 NULL);
29 if (!delayed_node_cache)
30 return -ENOMEM;
31 return 0;
32 }
33
btrfs_delayed_inode_exit(void)34 void __cold btrfs_delayed_inode_exit(void)
35 {
36 kmem_cache_destroy(delayed_node_cache);
37 }
38
btrfs_init_delayed_node(struct btrfs_delayed_node * delayed_node,struct btrfs_root * root,u64 inode_id)39 static inline void btrfs_init_delayed_node(
40 struct btrfs_delayed_node *delayed_node,
41 struct btrfs_root *root, u64 inode_id)
42 {
43 delayed_node->root = root;
44 delayed_node->inode_id = inode_id;
45 refcount_set(&delayed_node->refs, 0);
46 delayed_node->ins_root = RB_ROOT;
47 delayed_node->del_root = RB_ROOT;
48 mutex_init(&delayed_node->mutex);
49 INIT_LIST_HEAD(&delayed_node->n_list);
50 INIT_LIST_HEAD(&delayed_node->p_list);
51 }
52
btrfs_is_continuous_delayed_item(struct btrfs_delayed_item * item1,struct btrfs_delayed_item * item2)53 static inline int btrfs_is_continuous_delayed_item(
54 struct btrfs_delayed_item *item1,
55 struct btrfs_delayed_item *item2)
56 {
57 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
58 item1->key.objectid == item2->key.objectid &&
59 item1->key.type == item2->key.type &&
60 item1->key.offset + 1 == item2->key.offset)
61 return 1;
62 return 0;
63 }
64
btrfs_get_delayed_node(struct btrfs_inode * btrfs_inode)65 static struct btrfs_delayed_node *btrfs_get_delayed_node(
66 struct btrfs_inode *btrfs_inode)
67 {
68 struct btrfs_root *root = btrfs_inode->root;
69 u64 ino = btrfs_ino(btrfs_inode);
70 struct btrfs_delayed_node *node;
71
72 node = READ_ONCE(btrfs_inode->delayed_node);
73 if (node) {
74 refcount_inc(&node->refs);
75 return node;
76 }
77
78 spin_lock(&root->inode_lock);
79 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
80
81 if (node) {
82 if (btrfs_inode->delayed_node) {
83 refcount_inc(&node->refs); /* can be accessed */
84 BUG_ON(btrfs_inode->delayed_node != node);
85 spin_unlock(&root->inode_lock);
86 return node;
87 }
88
89 /*
90 * It's possible that we're racing into the middle of removing
91 * this node from the radix tree. In this case, the refcount
92 * was zero and it should never go back to one. Just return
93 * NULL like it was never in the radix at all; our release
94 * function is in the process of removing it.
95 *
96 * Some implementations of refcount_inc refuse to bump the
97 * refcount once it has hit zero. If we don't do this dance
98 * here, refcount_inc() may decide to just WARN_ONCE() instead
99 * of actually bumping the refcount.
100 *
101 * If this node is properly in the radix, we want to bump the
102 * refcount twice, once for the inode and once for this get
103 * operation.
104 */
105 if (refcount_inc_not_zero(&node->refs)) {
106 refcount_inc(&node->refs);
107 btrfs_inode->delayed_node = node;
108 } else {
109 node = NULL;
110 }
111
112 spin_unlock(&root->inode_lock);
113 return node;
114 }
115 spin_unlock(&root->inode_lock);
116
117 return NULL;
118 }
119
120 /* Will return either the node or PTR_ERR(-ENOMEM) */
btrfs_get_or_create_delayed_node(struct btrfs_inode * btrfs_inode)121 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
122 struct btrfs_inode *btrfs_inode)
123 {
124 struct btrfs_delayed_node *node;
125 struct btrfs_root *root = btrfs_inode->root;
126 u64 ino = btrfs_ino(btrfs_inode);
127 int ret;
128
129 again:
130 node = btrfs_get_delayed_node(btrfs_inode);
131 if (node)
132 return node;
133
134 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
135 if (!node)
136 return ERR_PTR(-ENOMEM);
137 btrfs_init_delayed_node(node, root, ino);
138
139 /* cached in the btrfs inode and can be accessed */
140 refcount_set(&node->refs, 2);
141
142 ret = radix_tree_preload(GFP_NOFS);
143 if (ret) {
144 kmem_cache_free(delayed_node_cache, node);
145 return ERR_PTR(ret);
146 }
147
148 spin_lock(&root->inode_lock);
149 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
150 if (ret == -EEXIST) {
151 spin_unlock(&root->inode_lock);
152 kmem_cache_free(delayed_node_cache, node);
153 radix_tree_preload_end();
154 goto again;
155 }
156 btrfs_inode->delayed_node = node;
157 spin_unlock(&root->inode_lock);
158 radix_tree_preload_end();
159
160 return node;
161 }
162
163 /*
164 * Call it when holding delayed_node->mutex
165 *
166 * If mod = 1, add this node into the prepared list.
167 */
btrfs_queue_delayed_node(struct btrfs_delayed_root * root,struct btrfs_delayed_node * node,int mod)168 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
169 struct btrfs_delayed_node *node,
170 int mod)
171 {
172 spin_lock(&root->lock);
173 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
174 if (!list_empty(&node->p_list))
175 list_move_tail(&node->p_list, &root->prepare_list);
176 else if (mod)
177 list_add_tail(&node->p_list, &root->prepare_list);
178 } else {
179 list_add_tail(&node->n_list, &root->node_list);
180 list_add_tail(&node->p_list, &root->prepare_list);
181 refcount_inc(&node->refs); /* inserted into list */
182 root->nodes++;
183 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
184 }
185 spin_unlock(&root->lock);
186 }
187
188 /* Call it when holding delayed_node->mutex */
btrfs_dequeue_delayed_node(struct btrfs_delayed_root * root,struct btrfs_delayed_node * node)189 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
190 struct btrfs_delayed_node *node)
191 {
192 spin_lock(&root->lock);
193 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
194 root->nodes--;
195 refcount_dec(&node->refs); /* not in the list */
196 list_del_init(&node->n_list);
197 if (!list_empty(&node->p_list))
198 list_del_init(&node->p_list);
199 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
200 }
201 spin_unlock(&root->lock);
202 }
203
btrfs_first_delayed_node(struct btrfs_delayed_root * delayed_root)204 static struct btrfs_delayed_node *btrfs_first_delayed_node(
205 struct btrfs_delayed_root *delayed_root)
206 {
207 struct list_head *p;
208 struct btrfs_delayed_node *node = NULL;
209
210 spin_lock(&delayed_root->lock);
211 if (list_empty(&delayed_root->node_list))
212 goto out;
213
214 p = delayed_root->node_list.next;
215 node = list_entry(p, struct btrfs_delayed_node, n_list);
216 refcount_inc(&node->refs);
217 out:
218 spin_unlock(&delayed_root->lock);
219
220 return node;
221 }
222
btrfs_next_delayed_node(struct btrfs_delayed_node * node)223 static struct btrfs_delayed_node *btrfs_next_delayed_node(
224 struct btrfs_delayed_node *node)
225 {
226 struct btrfs_delayed_root *delayed_root;
227 struct list_head *p;
228 struct btrfs_delayed_node *next = NULL;
229
230 delayed_root = node->root->fs_info->delayed_root;
231 spin_lock(&delayed_root->lock);
232 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
233 /* not in the list */
234 if (list_empty(&delayed_root->node_list))
235 goto out;
236 p = delayed_root->node_list.next;
237 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
238 goto out;
239 else
240 p = node->n_list.next;
241
242 next = list_entry(p, struct btrfs_delayed_node, n_list);
243 refcount_inc(&next->refs);
244 out:
245 spin_unlock(&delayed_root->lock);
246
247 return next;
248 }
249
__btrfs_release_delayed_node(struct btrfs_delayed_node * delayed_node,int mod)250 static void __btrfs_release_delayed_node(
251 struct btrfs_delayed_node *delayed_node,
252 int mod)
253 {
254 struct btrfs_delayed_root *delayed_root;
255
256 if (!delayed_node)
257 return;
258
259 delayed_root = delayed_node->root->fs_info->delayed_root;
260
261 mutex_lock(&delayed_node->mutex);
262 if (delayed_node->count)
263 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
264 else
265 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
266 mutex_unlock(&delayed_node->mutex);
267
268 if (refcount_dec_and_test(&delayed_node->refs)) {
269 struct btrfs_root *root = delayed_node->root;
270
271 spin_lock(&root->inode_lock);
272 /*
273 * Once our refcount goes to zero, nobody is allowed to bump it
274 * back up. We can delete it now.
275 */
276 ASSERT(refcount_read(&delayed_node->refs) == 0);
277 radix_tree_delete(&root->delayed_nodes_tree,
278 delayed_node->inode_id);
279 spin_unlock(&root->inode_lock);
280 kmem_cache_free(delayed_node_cache, delayed_node);
281 }
282 }
283
btrfs_release_delayed_node(struct btrfs_delayed_node * node)284 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
285 {
286 __btrfs_release_delayed_node(node, 0);
287 }
288
btrfs_first_prepared_delayed_node(struct btrfs_delayed_root * delayed_root)289 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
290 struct btrfs_delayed_root *delayed_root)
291 {
292 struct list_head *p;
293 struct btrfs_delayed_node *node = NULL;
294
295 spin_lock(&delayed_root->lock);
296 if (list_empty(&delayed_root->prepare_list))
297 goto out;
298
299 p = delayed_root->prepare_list.next;
300 list_del_init(p);
301 node = list_entry(p, struct btrfs_delayed_node, p_list);
302 refcount_inc(&node->refs);
303 out:
304 spin_unlock(&delayed_root->lock);
305
306 return node;
307 }
308
btrfs_release_prepared_delayed_node(struct btrfs_delayed_node * node)309 static inline void btrfs_release_prepared_delayed_node(
310 struct btrfs_delayed_node *node)
311 {
312 __btrfs_release_delayed_node(node, 1);
313 }
314
btrfs_alloc_delayed_item(u32 data_len)315 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
316 {
317 struct btrfs_delayed_item *item;
318 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
319 if (item) {
320 item->data_len = data_len;
321 item->ins_or_del = 0;
322 item->bytes_reserved = 0;
323 item->delayed_node = NULL;
324 refcount_set(&item->refs, 1);
325 }
326 return item;
327 }
328
329 /*
330 * __btrfs_lookup_delayed_item - look up the delayed item by key
331 * @delayed_node: pointer to the delayed node
332 * @key: the key to look up
333 * @prev: used to store the prev item if the right item isn't found
334 * @next: used to store the next item if the right item isn't found
335 *
336 * Note: if we don't find the right item, we will return the prev item and
337 * the next item.
338 */
__btrfs_lookup_delayed_item(struct rb_root * root,struct btrfs_key * key,struct btrfs_delayed_item ** prev,struct btrfs_delayed_item ** next)339 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
340 struct rb_root *root,
341 struct btrfs_key *key,
342 struct btrfs_delayed_item **prev,
343 struct btrfs_delayed_item **next)
344 {
345 struct rb_node *node, *prev_node = NULL;
346 struct btrfs_delayed_item *delayed_item = NULL;
347 int ret = 0;
348
349 node = root->rb_node;
350
351 while (node) {
352 delayed_item = rb_entry(node, struct btrfs_delayed_item,
353 rb_node);
354 prev_node = node;
355 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
356 if (ret < 0)
357 node = node->rb_right;
358 else if (ret > 0)
359 node = node->rb_left;
360 else
361 return delayed_item;
362 }
363
364 if (prev) {
365 if (!prev_node)
366 *prev = NULL;
367 else if (ret < 0)
368 *prev = delayed_item;
369 else if ((node = rb_prev(prev_node)) != NULL) {
370 *prev = rb_entry(node, struct btrfs_delayed_item,
371 rb_node);
372 } else
373 *prev = NULL;
374 }
375
376 if (next) {
377 if (!prev_node)
378 *next = NULL;
379 else if (ret > 0)
380 *next = delayed_item;
381 else if ((node = rb_next(prev_node)) != NULL) {
382 *next = rb_entry(node, struct btrfs_delayed_item,
383 rb_node);
384 } else
385 *next = NULL;
386 }
387 return NULL;
388 }
389
__btrfs_lookup_delayed_insertion_item(struct btrfs_delayed_node * delayed_node,struct btrfs_key * key)390 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
391 struct btrfs_delayed_node *delayed_node,
392 struct btrfs_key *key)
393 {
394 return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
395 NULL, NULL);
396 }
397
__btrfs_add_delayed_item(struct btrfs_delayed_node * delayed_node,struct btrfs_delayed_item * ins,int action)398 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
399 struct btrfs_delayed_item *ins,
400 int action)
401 {
402 struct rb_node **p, *node;
403 struct rb_node *parent_node = NULL;
404 struct rb_root *root;
405 struct btrfs_delayed_item *item;
406 int cmp;
407
408 if (action == BTRFS_DELAYED_INSERTION_ITEM)
409 root = &delayed_node->ins_root;
410 else if (action == BTRFS_DELAYED_DELETION_ITEM)
411 root = &delayed_node->del_root;
412 else
413 BUG();
414 p = &root->rb_node;
415 node = &ins->rb_node;
416
417 while (*p) {
418 parent_node = *p;
419 item = rb_entry(parent_node, struct btrfs_delayed_item,
420 rb_node);
421
422 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
423 if (cmp < 0)
424 p = &(*p)->rb_right;
425 else if (cmp > 0)
426 p = &(*p)->rb_left;
427 else
428 return -EEXIST;
429 }
430
431 rb_link_node(node, parent_node, p);
432 rb_insert_color(node, root);
433 ins->delayed_node = delayed_node;
434 ins->ins_or_del = action;
435
436 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
437 action == BTRFS_DELAYED_INSERTION_ITEM &&
438 ins->key.offset >= delayed_node->index_cnt)
439 delayed_node->index_cnt = ins->key.offset + 1;
440
441 delayed_node->count++;
442 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
443 return 0;
444 }
445
__btrfs_add_delayed_insertion_item(struct btrfs_delayed_node * node,struct btrfs_delayed_item * item)446 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
447 struct btrfs_delayed_item *item)
448 {
449 return __btrfs_add_delayed_item(node, item,
450 BTRFS_DELAYED_INSERTION_ITEM);
451 }
452
__btrfs_add_delayed_deletion_item(struct btrfs_delayed_node * node,struct btrfs_delayed_item * item)453 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
454 struct btrfs_delayed_item *item)
455 {
456 return __btrfs_add_delayed_item(node, item,
457 BTRFS_DELAYED_DELETION_ITEM);
458 }
459
finish_one_item(struct btrfs_delayed_root * delayed_root)460 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
461 {
462 int seq = atomic_inc_return(&delayed_root->items_seq);
463
464 /* atomic_dec_return implies a barrier */
465 if ((atomic_dec_return(&delayed_root->items) <
466 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
467 cond_wake_up_nomb(&delayed_root->wait);
468 }
469
__btrfs_remove_delayed_item(struct btrfs_delayed_item * delayed_item)470 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
471 {
472 struct rb_root *root;
473 struct btrfs_delayed_root *delayed_root;
474
475 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
476
477 BUG_ON(!delayed_root);
478 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
479 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
480
481 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
482 root = &delayed_item->delayed_node->ins_root;
483 else
484 root = &delayed_item->delayed_node->del_root;
485
486 rb_erase(&delayed_item->rb_node, root);
487 delayed_item->delayed_node->count--;
488
489 finish_one_item(delayed_root);
490 }
491
btrfs_release_delayed_item(struct btrfs_delayed_item * item)492 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
493 {
494 if (item) {
495 __btrfs_remove_delayed_item(item);
496 if (refcount_dec_and_test(&item->refs))
497 kfree(item);
498 }
499 }
500
__btrfs_first_delayed_insertion_item(struct btrfs_delayed_node * delayed_node)501 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
502 struct btrfs_delayed_node *delayed_node)
503 {
504 struct rb_node *p;
505 struct btrfs_delayed_item *item = NULL;
506
507 p = rb_first(&delayed_node->ins_root);
508 if (p)
509 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
510
511 return item;
512 }
513
__btrfs_first_delayed_deletion_item(struct btrfs_delayed_node * delayed_node)514 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
515 struct btrfs_delayed_node *delayed_node)
516 {
517 struct rb_node *p;
518 struct btrfs_delayed_item *item = NULL;
519
520 p = rb_first(&delayed_node->del_root);
521 if (p)
522 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
523
524 return item;
525 }
526
__btrfs_next_delayed_item(struct btrfs_delayed_item * item)527 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
528 struct btrfs_delayed_item *item)
529 {
530 struct rb_node *p;
531 struct btrfs_delayed_item *next = NULL;
532
533 p = rb_next(&item->rb_node);
534 if (p)
535 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
536
537 return next;
538 }
539
btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_delayed_item * item)540 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
541 struct btrfs_root *root,
542 struct btrfs_delayed_item *item)
543 {
544 struct btrfs_block_rsv *src_rsv;
545 struct btrfs_block_rsv *dst_rsv;
546 struct btrfs_fs_info *fs_info = root->fs_info;
547 u64 num_bytes;
548 int ret;
549
550 if (!trans->bytes_reserved)
551 return 0;
552
553 src_rsv = trans->block_rsv;
554 dst_rsv = &fs_info->delayed_block_rsv;
555
556 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
557
558 /*
559 * Here we migrate space rsv from transaction rsv, since have already
560 * reserved space when starting a transaction. So no need to reserve
561 * qgroup space here.
562 */
563 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
564 if (!ret) {
565 trace_btrfs_space_reservation(fs_info, "delayed_item",
566 item->key.objectid,
567 num_bytes, 1);
568 item->bytes_reserved = num_bytes;
569 }
570
571 return ret;
572 }
573
btrfs_delayed_item_release_metadata(struct btrfs_root * root,struct btrfs_delayed_item * item)574 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
575 struct btrfs_delayed_item *item)
576 {
577 struct btrfs_block_rsv *rsv;
578 struct btrfs_fs_info *fs_info = root->fs_info;
579
580 if (!item->bytes_reserved)
581 return;
582
583 rsv = &fs_info->delayed_block_rsv;
584 /*
585 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
586 * to release/reserve qgroup space.
587 */
588 trace_btrfs_space_reservation(fs_info, "delayed_item",
589 item->key.objectid, item->bytes_reserved,
590 0);
591 btrfs_block_rsv_release(fs_info, rsv,
592 item->bytes_reserved);
593 }
594
btrfs_delayed_inode_reserve_metadata(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_delayed_node * node)595 static int btrfs_delayed_inode_reserve_metadata(
596 struct btrfs_trans_handle *trans,
597 struct btrfs_root *root,
598 struct btrfs_inode *inode,
599 struct btrfs_delayed_node *node)
600 {
601 struct btrfs_fs_info *fs_info = root->fs_info;
602 struct btrfs_block_rsv *src_rsv;
603 struct btrfs_block_rsv *dst_rsv;
604 u64 num_bytes;
605 int ret;
606
607 src_rsv = trans->block_rsv;
608 dst_rsv = &fs_info->delayed_block_rsv;
609
610 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
611
612 /*
613 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
614 * which doesn't reserve space for speed. This is a problem since we
615 * still need to reserve space for this update, so try to reserve the
616 * space.
617 *
618 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
619 * we always reserve enough to update the inode item.
620 */
621 if (!src_rsv || (!trans->bytes_reserved &&
622 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
623 ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
624 if (ret < 0)
625 return ret;
626 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
627 BTRFS_RESERVE_NO_FLUSH);
628 /*
629 * Since we're under a transaction reserve_metadata_bytes could
630 * try to commit the transaction which will make it return
631 * EAGAIN to make us stop the transaction we have, so return
632 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
633 */
634 if (ret == -EAGAIN) {
635 ret = -ENOSPC;
636 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
637 }
638 if (!ret) {
639 node->bytes_reserved = num_bytes;
640 trace_btrfs_space_reservation(fs_info,
641 "delayed_inode",
642 btrfs_ino(inode),
643 num_bytes, 1);
644 } else {
645 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
646 }
647 return ret;
648 }
649
650 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
651 if (!ret) {
652 trace_btrfs_space_reservation(fs_info, "delayed_inode",
653 btrfs_ino(inode), num_bytes, 1);
654 node->bytes_reserved = num_bytes;
655 }
656
657 return ret;
658 }
659
btrfs_delayed_inode_release_metadata(struct btrfs_fs_info * fs_info,struct btrfs_delayed_node * node,bool qgroup_free)660 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
661 struct btrfs_delayed_node *node,
662 bool qgroup_free)
663 {
664 struct btrfs_block_rsv *rsv;
665
666 if (!node->bytes_reserved)
667 return;
668
669 rsv = &fs_info->delayed_block_rsv;
670 trace_btrfs_space_reservation(fs_info, "delayed_inode",
671 node->inode_id, node->bytes_reserved, 0);
672 btrfs_block_rsv_release(fs_info, rsv,
673 node->bytes_reserved);
674 if (qgroup_free)
675 btrfs_qgroup_free_meta_prealloc(node->root,
676 node->bytes_reserved);
677 else
678 btrfs_qgroup_convert_reserved_meta(node->root,
679 node->bytes_reserved);
680 node->bytes_reserved = 0;
681 }
682
683 /*
684 * This helper will insert some continuous items into the same leaf according
685 * to the free space of the leaf.
686 */
btrfs_batch_insert_items(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * item)687 static int btrfs_batch_insert_items(struct btrfs_root *root,
688 struct btrfs_path *path,
689 struct btrfs_delayed_item *item)
690 {
691 struct btrfs_fs_info *fs_info = root->fs_info;
692 struct btrfs_delayed_item *curr, *next;
693 int free_space;
694 int total_data_size = 0, total_size = 0;
695 struct extent_buffer *leaf;
696 char *data_ptr;
697 struct btrfs_key *keys;
698 u32 *data_size;
699 struct list_head head;
700 int slot;
701 int nitems;
702 int i;
703 int ret = 0;
704
705 BUG_ON(!path->nodes[0]);
706
707 leaf = path->nodes[0];
708 free_space = btrfs_leaf_free_space(fs_info, leaf);
709 INIT_LIST_HEAD(&head);
710
711 next = item;
712 nitems = 0;
713
714 /*
715 * count the number of the continuous items that we can insert in batch
716 */
717 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
718 free_space) {
719 total_data_size += next->data_len;
720 total_size += next->data_len + sizeof(struct btrfs_item);
721 list_add_tail(&next->tree_list, &head);
722 nitems++;
723
724 curr = next;
725 next = __btrfs_next_delayed_item(curr);
726 if (!next)
727 break;
728
729 if (!btrfs_is_continuous_delayed_item(curr, next))
730 break;
731 }
732
733 if (!nitems) {
734 ret = 0;
735 goto out;
736 }
737
738 /*
739 * we need allocate some memory space, but it might cause the task
740 * to sleep, so we set all locked nodes in the path to blocking locks
741 * first.
742 */
743 btrfs_set_path_blocking(path);
744
745 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
746 if (!keys) {
747 ret = -ENOMEM;
748 goto out;
749 }
750
751 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
752 if (!data_size) {
753 ret = -ENOMEM;
754 goto error;
755 }
756
757 /* get keys of all the delayed items */
758 i = 0;
759 list_for_each_entry(next, &head, tree_list) {
760 keys[i] = next->key;
761 data_size[i] = next->data_len;
762 i++;
763 }
764
765 /* reset all the locked nodes in the patch to spinning locks. */
766 btrfs_clear_path_blocking(path, NULL, 0);
767
768 /* insert the keys of the items */
769 setup_items_for_insert(root, path, keys, data_size,
770 total_data_size, total_size, nitems);
771
772 /* insert the dir index items */
773 slot = path->slots[0];
774 list_for_each_entry_safe(curr, next, &head, tree_list) {
775 data_ptr = btrfs_item_ptr(leaf, slot, char);
776 write_extent_buffer(leaf, &curr->data,
777 (unsigned long)data_ptr,
778 curr->data_len);
779 slot++;
780
781 btrfs_delayed_item_release_metadata(root, curr);
782
783 list_del(&curr->tree_list);
784 btrfs_release_delayed_item(curr);
785 }
786
787 error:
788 kfree(data_size);
789 kfree(keys);
790 out:
791 return ret;
792 }
793
794 /*
795 * This helper can just do simple insertion that needn't extend item for new
796 * data, such as directory name index insertion, inode insertion.
797 */
btrfs_insert_delayed_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * delayed_item)798 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
799 struct btrfs_root *root,
800 struct btrfs_path *path,
801 struct btrfs_delayed_item *delayed_item)
802 {
803 struct extent_buffer *leaf;
804 unsigned int nofs_flag;
805 char *ptr;
806 int ret;
807
808 nofs_flag = memalloc_nofs_save();
809 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
810 delayed_item->data_len);
811 memalloc_nofs_restore(nofs_flag);
812 if (ret < 0 && ret != -EEXIST)
813 return ret;
814
815 leaf = path->nodes[0];
816
817 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
818
819 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
820 delayed_item->data_len);
821 btrfs_mark_buffer_dirty(leaf);
822
823 btrfs_delayed_item_release_metadata(root, delayed_item);
824 return 0;
825 }
826
827 /*
828 * we insert an item first, then if there are some continuous items, we try
829 * to insert those items into the same leaf.
830 */
btrfs_insert_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_root * root,struct btrfs_delayed_node * node)831 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
832 struct btrfs_path *path,
833 struct btrfs_root *root,
834 struct btrfs_delayed_node *node)
835 {
836 struct btrfs_delayed_item *curr, *prev;
837 int ret = 0;
838
839 do_again:
840 mutex_lock(&node->mutex);
841 curr = __btrfs_first_delayed_insertion_item(node);
842 if (!curr)
843 goto insert_end;
844
845 ret = btrfs_insert_delayed_item(trans, root, path, curr);
846 if (ret < 0) {
847 btrfs_release_path(path);
848 goto insert_end;
849 }
850
851 prev = curr;
852 curr = __btrfs_next_delayed_item(prev);
853 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
854 /* insert the continuous items into the same leaf */
855 path->slots[0]++;
856 btrfs_batch_insert_items(root, path, curr);
857 }
858 btrfs_release_delayed_item(prev);
859 btrfs_mark_buffer_dirty(path->nodes[0]);
860
861 btrfs_release_path(path);
862 mutex_unlock(&node->mutex);
863 goto do_again;
864
865 insert_end:
866 mutex_unlock(&node->mutex);
867 return ret;
868 }
869
btrfs_batch_delete_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * item)870 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
871 struct btrfs_root *root,
872 struct btrfs_path *path,
873 struct btrfs_delayed_item *item)
874 {
875 struct btrfs_delayed_item *curr, *next;
876 struct extent_buffer *leaf;
877 struct btrfs_key key;
878 struct list_head head;
879 int nitems, i, last_item;
880 int ret = 0;
881
882 BUG_ON(!path->nodes[0]);
883
884 leaf = path->nodes[0];
885
886 i = path->slots[0];
887 last_item = btrfs_header_nritems(leaf) - 1;
888 if (i > last_item)
889 return -ENOENT; /* FIXME: Is errno suitable? */
890
891 next = item;
892 INIT_LIST_HEAD(&head);
893 btrfs_item_key_to_cpu(leaf, &key, i);
894 nitems = 0;
895 /*
896 * count the number of the dir index items that we can delete in batch
897 */
898 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
899 list_add_tail(&next->tree_list, &head);
900 nitems++;
901
902 curr = next;
903 next = __btrfs_next_delayed_item(curr);
904 if (!next)
905 break;
906
907 if (!btrfs_is_continuous_delayed_item(curr, next))
908 break;
909
910 i++;
911 if (i > last_item)
912 break;
913 btrfs_item_key_to_cpu(leaf, &key, i);
914 }
915
916 if (!nitems)
917 return 0;
918
919 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
920 if (ret)
921 goto out;
922
923 list_for_each_entry_safe(curr, next, &head, tree_list) {
924 btrfs_delayed_item_release_metadata(root, curr);
925 list_del(&curr->tree_list);
926 btrfs_release_delayed_item(curr);
927 }
928
929 out:
930 return ret;
931 }
932
btrfs_delete_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_root * root,struct btrfs_delayed_node * node)933 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
934 struct btrfs_path *path,
935 struct btrfs_root *root,
936 struct btrfs_delayed_node *node)
937 {
938 struct btrfs_delayed_item *curr, *prev;
939 unsigned int nofs_flag;
940 int ret = 0;
941
942 do_again:
943 mutex_lock(&node->mutex);
944 curr = __btrfs_first_delayed_deletion_item(node);
945 if (!curr)
946 goto delete_fail;
947
948 nofs_flag = memalloc_nofs_save();
949 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
950 memalloc_nofs_restore(nofs_flag);
951 if (ret < 0)
952 goto delete_fail;
953 else if (ret > 0) {
954 /*
955 * can't find the item which the node points to, so this node
956 * is invalid, just drop it.
957 */
958 prev = curr;
959 curr = __btrfs_next_delayed_item(prev);
960 btrfs_release_delayed_item(prev);
961 ret = 0;
962 btrfs_release_path(path);
963 if (curr) {
964 mutex_unlock(&node->mutex);
965 goto do_again;
966 } else
967 goto delete_fail;
968 }
969
970 btrfs_batch_delete_items(trans, root, path, curr);
971 btrfs_release_path(path);
972 mutex_unlock(&node->mutex);
973 goto do_again;
974
975 delete_fail:
976 btrfs_release_path(path);
977 mutex_unlock(&node->mutex);
978 return ret;
979 }
980
btrfs_release_delayed_inode(struct btrfs_delayed_node * delayed_node)981 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
982 {
983 struct btrfs_delayed_root *delayed_root;
984
985 if (delayed_node &&
986 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
987 BUG_ON(!delayed_node->root);
988 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
989 delayed_node->count--;
990
991 delayed_root = delayed_node->root->fs_info->delayed_root;
992 finish_one_item(delayed_root);
993 }
994 }
995
btrfs_release_delayed_iref(struct btrfs_delayed_node * delayed_node)996 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
997 {
998 struct btrfs_delayed_root *delayed_root;
999
1000 ASSERT(delayed_node->root);
1001 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1002 delayed_node->count--;
1003
1004 delayed_root = delayed_node->root->fs_info->delayed_root;
1005 finish_one_item(delayed_root);
1006 }
1007
__btrfs_update_delayed_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_node * node)1008 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1009 struct btrfs_root *root,
1010 struct btrfs_path *path,
1011 struct btrfs_delayed_node *node)
1012 {
1013 struct btrfs_fs_info *fs_info = root->fs_info;
1014 struct btrfs_key key;
1015 struct btrfs_inode_item *inode_item;
1016 struct extent_buffer *leaf;
1017 unsigned int nofs_flag;
1018 int mod;
1019 int ret;
1020
1021 key.objectid = node->inode_id;
1022 key.type = BTRFS_INODE_ITEM_KEY;
1023 key.offset = 0;
1024
1025 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1026 mod = -1;
1027 else
1028 mod = 1;
1029
1030 nofs_flag = memalloc_nofs_save();
1031 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1032 memalloc_nofs_restore(nofs_flag);
1033 if (ret > 0)
1034 ret = -ENOENT;
1035 if (ret < 0)
1036 goto out;
1037
1038 leaf = path->nodes[0];
1039 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1040 struct btrfs_inode_item);
1041 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1042 sizeof(struct btrfs_inode_item));
1043 btrfs_mark_buffer_dirty(leaf);
1044
1045 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1046 goto no_iref;
1047
1048 path->slots[0]++;
1049 if (path->slots[0] >= btrfs_header_nritems(leaf))
1050 goto search;
1051 again:
1052 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1053 if (key.objectid != node->inode_id)
1054 goto out;
1055
1056 if (key.type != BTRFS_INODE_REF_KEY &&
1057 key.type != BTRFS_INODE_EXTREF_KEY)
1058 goto out;
1059
1060 /*
1061 * Delayed iref deletion is for the inode who has only one link,
1062 * so there is only one iref. The case that several irefs are
1063 * in the same item doesn't exist.
1064 */
1065 btrfs_del_item(trans, root, path);
1066 out:
1067 btrfs_release_delayed_iref(node);
1068 no_iref:
1069 btrfs_release_path(path);
1070 err_out:
1071 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1072 btrfs_release_delayed_inode(node);
1073
1074 /*
1075 * If we fail to update the delayed inode we need to abort the
1076 * transaction, because we could leave the inode with the improper
1077 * counts behind.
1078 */
1079 if (ret && ret != -ENOENT)
1080 btrfs_abort_transaction(trans, ret);
1081
1082 return ret;
1083
1084 search:
1085 btrfs_release_path(path);
1086
1087 key.type = BTRFS_INODE_EXTREF_KEY;
1088 key.offset = -1;
1089
1090 nofs_flag = memalloc_nofs_save();
1091 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1092 memalloc_nofs_restore(nofs_flag);
1093 if (ret < 0)
1094 goto err_out;
1095 ASSERT(ret);
1096
1097 ret = 0;
1098 leaf = path->nodes[0];
1099 path->slots[0]--;
1100 goto again;
1101 }
1102
btrfs_update_delayed_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_node * node)1103 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1104 struct btrfs_root *root,
1105 struct btrfs_path *path,
1106 struct btrfs_delayed_node *node)
1107 {
1108 int ret;
1109
1110 mutex_lock(&node->mutex);
1111 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1112 mutex_unlock(&node->mutex);
1113 return 0;
1114 }
1115
1116 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1117 mutex_unlock(&node->mutex);
1118 return ret;
1119 }
1120
1121 static inline int
__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_delayed_node * node)1122 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1123 struct btrfs_path *path,
1124 struct btrfs_delayed_node *node)
1125 {
1126 int ret;
1127
1128 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1129 if (ret)
1130 return ret;
1131
1132 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1133 if (ret)
1134 return ret;
1135
1136 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1137 return ret;
1138 }
1139
1140 /*
1141 * Called when committing the transaction.
1142 * Returns 0 on success.
1143 * Returns < 0 on error and returns with an aborted transaction with any
1144 * outstanding delayed items cleaned up.
1145 */
__btrfs_run_delayed_items(struct btrfs_trans_handle * trans,int nr)1146 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1147 {
1148 struct btrfs_fs_info *fs_info = trans->fs_info;
1149 struct btrfs_delayed_root *delayed_root;
1150 struct btrfs_delayed_node *curr_node, *prev_node;
1151 struct btrfs_path *path;
1152 struct btrfs_block_rsv *block_rsv;
1153 int ret = 0;
1154 bool count = (nr > 0);
1155
1156 if (trans->aborted)
1157 return -EIO;
1158
1159 path = btrfs_alloc_path();
1160 if (!path)
1161 return -ENOMEM;
1162 path->leave_spinning = 1;
1163
1164 block_rsv = trans->block_rsv;
1165 trans->block_rsv = &fs_info->delayed_block_rsv;
1166
1167 delayed_root = fs_info->delayed_root;
1168
1169 curr_node = btrfs_first_delayed_node(delayed_root);
1170 while (curr_node && (!count || (count && nr--))) {
1171 ret = __btrfs_commit_inode_delayed_items(trans, path,
1172 curr_node);
1173 if (ret) {
1174 btrfs_abort_transaction(trans, ret);
1175 break;
1176 }
1177
1178 prev_node = curr_node;
1179 curr_node = btrfs_next_delayed_node(curr_node);
1180 /*
1181 * See the comment below about releasing path before releasing
1182 * node. If the commit of delayed items was successful the path
1183 * should always be released, but in case of an error, it may
1184 * point to locked extent buffers (a leaf at the very least).
1185 */
1186 ASSERT(path->nodes[0] == NULL);
1187 btrfs_release_delayed_node(prev_node);
1188 }
1189
1190 /*
1191 * Release the path to avoid a potential deadlock and lockdep splat when
1192 * releasing the delayed node, as that requires taking the delayed node's
1193 * mutex. If another task starts running delayed items before we take
1194 * the mutex, it will first lock the mutex and then it may try to lock
1195 * the same btree path (leaf).
1196 */
1197 btrfs_free_path(path);
1198
1199 if (curr_node)
1200 btrfs_release_delayed_node(curr_node);
1201 trans->block_rsv = block_rsv;
1202
1203 return ret;
1204 }
1205
btrfs_run_delayed_items(struct btrfs_trans_handle * trans)1206 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1207 {
1208 return __btrfs_run_delayed_items(trans, -1);
1209 }
1210
btrfs_run_delayed_items_nr(struct btrfs_trans_handle * trans,int nr)1211 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1212 {
1213 return __btrfs_run_delayed_items(trans, nr);
1214 }
1215
btrfs_commit_inode_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)1216 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1217 struct btrfs_inode *inode)
1218 {
1219 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1220 struct btrfs_path *path;
1221 struct btrfs_block_rsv *block_rsv;
1222 int ret;
1223
1224 if (!delayed_node)
1225 return 0;
1226
1227 mutex_lock(&delayed_node->mutex);
1228 if (!delayed_node->count) {
1229 mutex_unlock(&delayed_node->mutex);
1230 btrfs_release_delayed_node(delayed_node);
1231 return 0;
1232 }
1233 mutex_unlock(&delayed_node->mutex);
1234
1235 path = btrfs_alloc_path();
1236 if (!path) {
1237 btrfs_release_delayed_node(delayed_node);
1238 return -ENOMEM;
1239 }
1240 path->leave_spinning = 1;
1241
1242 block_rsv = trans->block_rsv;
1243 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1244
1245 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1246
1247 btrfs_release_delayed_node(delayed_node);
1248 btrfs_free_path(path);
1249 trans->block_rsv = block_rsv;
1250
1251 return ret;
1252 }
1253
btrfs_commit_inode_delayed_inode(struct btrfs_inode * inode)1254 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1255 {
1256 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1257 struct btrfs_trans_handle *trans;
1258 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1259 struct btrfs_path *path;
1260 struct btrfs_block_rsv *block_rsv;
1261 int ret;
1262
1263 if (!delayed_node)
1264 return 0;
1265
1266 mutex_lock(&delayed_node->mutex);
1267 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1268 mutex_unlock(&delayed_node->mutex);
1269 btrfs_release_delayed_node(delayed_node);
1270 return 0;
1271 }
1272 mutex_unlock(&delayed_node->mutex);
1273
1274 trans = btrfs_join_transaction(delayed_node->root);
1275 if (IS_ERR(trans)) {
1276 ret = PTR_ERR(trans);
1277 goto out;
1278 }
1279
1280 path = btrfs_alloc_path();
1281 if (!path) {
1282 ret = -ENOMEM;
1283 goto trans_out;
1284 }
1285 path->leave_spinning = 1;
1286
1287 block_rsv = trans->block_rsv;
1288 trans->block_rsv = &fs_info->delayed_block_rsv;
1289
1290 mutex_lock(&delayed_node->mutex);
1291 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1292 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1293 path, delayed_node);
1294 else
1295 ret = 0;
1296 mutex_unlock(&delayed_node->mutex);
1297
1298 btrfs_free_path(path);
1299 trans->block_rsv = block_rsv;
1300 trans_out:
1301 btrfs_end_transaction(trans);
1302 btrfs_btree_balance_dirty(fs_info);
1303 out:
1304 btrfs_release_delayed_node(delayed_node);
1305
1306 return ret;
1307 }
1308
btrfs_remove_delayed_node(struct btrfs_inode * inode)1309 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1310 {
1311 struct btrfs_delayed_node *delayed_node;
1312
1313 delayed_node = READ_ONCE(inode->delayed_node);
1314 if (!delayed_node)
1315 return;
1316
1317 inode->delayed_node = NULL;
1318 btrfs_release_delayed_node(delayed_node);
1319 }
1320
1321 struct btrfs_async_delayed_work {
1322 struct btrfs_delayed_root *delayed_root;
1323 int nr;
1324 struct btrfs_work work;
1325 };
1326
btrfs_async_run_delayed_root(struct btrfs_work * work)1327 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1328 {
1329 struct btrfs_async_delayed_work *async_work;
1330 struct btrfs_delayed_root *delayed_root;
1331 struct btrfs_trans_handle *trans;
1332 struct btrfs_path *path;
1333 struct btrfs_delayed_node *delayed_node = NULL;
1334 struct btrfs_root *root;
1335 struct btrfs_block_rsv *block_rsv;
1336 int total_done = 0;
1337
1338 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1339 delayed_root = async_work->delayed_root;
1340
1341 path = btrfs_alloc_path();
1342 if (!path)
1343 goto out;
1344
1345 do {
1346 if (atomic_read(&delayed_root->items) <
1347 BTRFS_DELAYED_BACKGROUND / 2)
1348 break;
1349
1350 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1351 if (!delayed_node)
1352 break;
1353
1354 path->leave_spinning = 1;
1355 root = delayed_node->root;
1356
1357 trans = btrfs_join_transaction(root);
1358 if (IS_ERR(trans)) {
1359 btrfs_release_path(path);
1360 btrfs_release_prepared_delayed_node(delayed_node);
1361 total_done++;
1362 continue;
1363 }
1364
1365 block_rsv = trans->block_rsv;
1366 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1367
1368 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1369
1370 trans->block_rsv = block_rsv;
1371 btrfs_end_transaction(trans);
1372 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1373
1374 btrfs_release_path(path);
1375 btrfs_release_prepared_delayed_node(delayed_node);
1376 total_done++;
1377
1378 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1379 || total_done < async_work->nr);
1380
1381 btrfs_free_path(path);
1382 out:
1383 wake_up(&delayed_root->wait);
1384 kfree(async_work);
1385 }
1386
1387
btrfs_wq_run_delayed_node(struct btrfs_delayed_root * delayed_root,struct btrfs_fs_info * fs_info,int nr)1388 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1389 struct btrfs_fs_info *fs_info, int nr)
1390 {
1391 struct btrfs_async_delayed_work *async_work;
1392
1393 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1394 if (!async_work)
1395 return -ENOMEM;
1396
1397 async_work->delayed_root = delayed_root;
1398 btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1399 btrfs_async_run_delayed_root, NULL, NULL);
1400 async_work->nr = nr;
1401
1402 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1403 return 0;
1404 }
1405
btrfs_assert_delayed_root_empty(struct btrfs_fs_info * fs_info)1406 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1407 {
1408 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1409 }
1410
could_end_wait(struct btrfs_delayed_root * delayed_root,int seq)1411 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1412 {
1413 int val = atomic_read(&delayed_root->items_seq);
1414
1415 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1416 return 1;
1417
1418 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1419 return 1;
1420
1421 return 0;
1422 }
1423
btrfs_balance_delayed_items(struct btrfs_fs_info * fs_info)1424 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1425 {
1426 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1427
1428 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1429 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1430 return;
1431
1432 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1433 int seq;
1434 int ret;
1435
1436 seq = atomic_read(&delayed_root->items_seq);
1437
1438 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1439 if (ret)
1440 return;
1441
1442 wait_event_interruptible(delayed_root->wait,
1443 could_end_wait(delayed_root, seq));
1444 return;
1445 }
1446
1447 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1448 }
1449
1450 /* Will return 0 or -ENOMEM */
btrfs_insert_delayed_dir_index(struct btrfs_trans_handle * trans,const char * name,int name_len,struct btrfs_inode * dir,struct btrfs_disk_key * disk_key,u8 type,u64 index)1451 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1452 const char *name, int name_len,
1453 struct btrfs_inode *dir,
1454 struct btrfs_disk_key *disk_key, u8 type,
1455 u64 index)
1456 {
1457 struct btrfs_delayed_node *delayed_node;
1458 struct btrfs_delayed_item *delayed_item;
1459 struct btrfs_dir_item *dir_item;
1460 int ret;
1461
1462 delayed_node = btrfs_get_or_create_delayed_node(dir);
1463 if (IS_ERR(delayed_node))
1464 return PTR_ERR(delayed_node);
1465
1466 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1467 if (!delayed_item) {
1468 ret = -ENOMEM;
1469 goto release_node;
1470 }
1471
1472 delayed_item->key.objectid = btrfs_ino(dir);
1473 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1474 delayed_item->key.offset = index;
1475
1476 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1477 dir_item->location = *disk_key;
1478 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1479 btrfs_set_stack_dir_data_len(dir_item, 0);
1480 btrfs_set_stack_dir_name_len(dir_item, name_len);
1481 btrfs_set_stack_dir_type(dir_item, type);
1482 memcpy((char *)(dir_item + 1), name, name_len);
1483
1484 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1485 /*
1486 * we have reserved enough space when we start a new transaction,
1487 * so reserving metadata failure is impossible
1488 */
1489 BUG_ON(ret);
1490
1491 mutex_lock(&delayed_node->mutex);
1492 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1493 if (unlikely(ret)) {
1494 btrfs_err(trans->fs_info,
1495 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1496 name_len, name, delayed_node->root->objectid,
1497 delayed_node->inode_id, ret);
1498 BUG();
1499 }
1500 mutex_unlock(&delayed_node->mutex);
1501
1502 release_node:
1503 btrfs_release_delayed_node(delayed_node);
1504 return ret;
1505 }
1506
btrfs_delete_delayed_insertion_item(struct btrfs_fs_info * fs_info,struct btrfs_delayed_node * node,struct btrfs_key * key)1507 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1508 struct btrfs_delayed_node *node,
1509 struct btrfs_key *key)
1510 {
1511 struct btrfs_delayed_item *item;
1512
1513 mutex_lock(&node->mutex);
1514 item = __btrfs_lookup_delayed_insertion_item(node, key);
1515 if (!item) {
1516 mutex_unlock(&node->mutex);
1517 return 1;
1518 }
1519
1520 btrfs_delayed_item_release_metadata(node->root, item);
1521 btrfs_release_delayed_item(item);
1522 mutex_unlock(&node->mutex);
1523 return 0;
1524 }
1525
btrfs_delete_delayed_dir_index(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,u64 index)1526 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1527 struct btrfs_inode *dir, u64 index)
1528 {
1529 struct btrfs_delayed_node *node;
1530 struct btrfs_delayed_item *item;
1531 struct btrfs_key item_key;
1532 int ret;
1533
1534 node = btrfs_get_or_create_delayed_node(dir);
1535 if (IS_ERR(node))
1536 return PTR_ERR(node);
1537
1538 item_key.objectid = btrfs_ino(dir);
1539 item_key.type = BTRFS_DIR_INDEX_KEY;
1540 item_key.offset = index;
1541
1542 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1543 &item_key);
1544 if (!ret)
1545 goto end;
1546
1547 item = btrfs_alloc_delayed_item(0);
1548 if (!item) {
1549 ret = -ENOMEM;
1550 goto end;
1551 }
1552
1553 item->key = item_key;
1554
1555 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1556 /*
1557 * we have reserved enough space when we start a new transaction,
1558 * so reserving metadata failure is impossible.
1559 */
1560 BUG_ON(ret);
1561
1562 mutex_lock(&node->mutex);
1563 ret = __btrfs_add_delayed_deletion_item(node, item);
1564 if (unlikely(ret)) {
1565 btrfs_err(trans->fs_info,
1566 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1567 index, node->root->objectid, node->inode_id, ret);
1568 BUG();
1569 }
1570 mutex_unlock(&node->mutex);
1571 end:
1572 btrfs_release_delayed_node(node);
1573 return ret;
1574 }
1575
btrfs_inode_delayed_dir_index_count(struct btrfs_inode * inode)1576 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1577 {
1578 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1579
1580 if (!delayed_node)
1581 return -ENOENT;
1582
1583 /*
1584 * Since we have held i_mutex of this directory, it is impossible that
1585 * a new directory index is added into the delayed node and index_cnt
1586 * is updated now. So we needn't lock the delayed node.
1587 */
1588 if (!delayed_node->index_cnt) {
1589 btrfs_release_delayed_node(delayed_node);
1590 return -EINVAL;
1591 }
1592
1593 inode->index_cnt = delayed_node->index_cnt;
1594 btrfs_release_delayed_node(delayed_node);
1595 return 0;
1596 }
1597
btrfs_readdir_get_delayed_items(struct inode * inode,struct list_head * ins_list,struct list_head * del_list)1598 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1599 struct list_head *ins_list,
1600 struct list_head *del_list)
1601 {
1602 struct btrfs_delayed_node *delayed_node;
1603 struct btrfs_delayed_item *item;
1604
1605 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1606 if (!delayed_node)
1607 return false;
1608
1609 /*
1610 * We can only do one readdir with delayed items at a time because of
1611 * item->readdir_list.
1612 */
1613 inode_unlock_shared(inode);
1614 inode_lock(inode);
1615
1616 mutex_lock(&delayed_node->mutex);
1617 item = __btrfs_first_delayed_insertion_item(delayed_node);
1618 while (item) {
1619 refcount_inc(&item->refs);
1620 list_add_tail(&item->readdir_list, ins_list);
1621 item = __btrfs_next_delayed_item(item);
1622 }
1623
1624 item = __btrfs_first_delayed_deletion_item(delayed_node);
1625 while (item) {
1626 refcount_inc(&item->refs);
1627 list_add_tail(&item->readdir_list, del_list);
1628 item = __btrfs_next_delayed_item(item);
1629 }
1630 mutex_unlock(&delayed_node->mutex);
1631 /*
1632 * This delayed node is still cached in the btrfs inode, so refs
1633 * must be > 1 now, and we needn't check it is going to be freed
1634 * or not.
1635 *
1636 * Besides that, this function is used to read dir, we do not
1637 * insert/delete delayed items in this period. So we also needn't
1638 * requeue or dequeue this delayed node.
1639 */
1640 refcount_dec(&delayed_node->refs);
1641
1642 return true;
1643 }
1644
btrfs_readdir_put_delayed_items(struct inode * inode,struct list_head * ins_list,struct list_head * del_list)1645 void btrfs_readdir_put_delayed_items(struct inode *inode,
1646 struct list_head *ins_list,
1647 struct list_head *del_list)
1648 {
1649 struct btrfs_delayed_item *curr, *next;
1650
1651 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1652 list_del(&curr->readdir_list);
1653 if (refcount_dec_and_test(&curr->refs))
1654 kfree(curr);
1655 }
1656
1657 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1658 list_del(&curr->readdir_list);
1659 if (refcount_dec_and_test(&curr->refs))
1660 kfree(curr);
1661 }
1662
1663 /*
1664 * The VFS is going to do up_read(), so we need to downgrade back to a
1665 * read lock.
1666 */
1667 downgrade_write(&inode->i_rwsem);
1668 }
1669
btrfs_should_delete_dir_index(struct list_head * del_list,u64 index)1670 int btrfs_should_delete_dir_index(struct list_head *del_list,
1671 u64 index)
1672 {
1673 struct btrfs_delayed_item *curr;
1674 int ret = 0;
1675
1676 list_for_each_entry(curr, del_list, readdir_list) {
1677 if (curr->key.offset > index)
1678 break;
1679 if (curr->key.offset == index) {
1680 ret = 1;
1681 break;
1682 }
1683 }
1684 return ret;
1685 }
1686
1687 /*
1688 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1689 *
1690 */
btrfs_readdir_delayed_dir_index(struct dir_context * ctx,struct list_head * ins_list)1691 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1692 struct list_head *ins_list)
1693 {
1694 struct btrfs_dir_item *di;
1695 struct btrfs_delayed_item *curr, *next;
1696 struct btrfs_key location;
1697 char *name;
1698 int name_len;
1699 int over = 0;
1700 unsigned char d_type;
1701
1702 if (list_empty(ins_list))
1703 return 0;
1704
1705 /*
1706 * Changing the data of the delayed item is impossible. So
1707 * we needn't lock them. And we have held i_mutex of the
1708 * directory, nobody can delete any directory indexes now.
1709 */
1710 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1711 list_del(&curr->readdir_list);
1712
1713 if (curr->key.offset < ctx->pos) {
1714 if (refcount_dec_and_test(&curr->refs))
1715 kfree(curr);
1716 continue;
1717 }
1718
1719 ctx->pos = curr->key.offset;
1720
1721 di = (struct btrfs_dir_item *)curr->data;
1722 name = (char *)(di + 1);
1723 name_len = btrfs_stack_dir_name_len(di);
1724
1725 d_type = btrfs_filetype_table[di->type];
1726 btrfs_disk_key_to_cpu(&location, &di->location);
1727
1728 over = !dir_emit(ctx, name, name_len,
1729 location.objectid, d_type);
1730
1731 if (refcount_dec_and_test(&curr->refs))
1732 kfree(curr);
1733
1734 if (over)
1735 return 1;
1736 ctx->pos++;
1737 }
1738 return 0;
1739 }
1740
fill_stack_inode_item(struct btrfs_trans_handle * trans,struct btrfs_inode_item * inode_item,struct inode * inode)1741 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1742 struct btrfs_inode_item *inode_item,
1743 struct inode *inode)
1744 {
1745 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1746 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1747 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1748 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1749 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1750 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1751 btrfs_set_stack_inode_generation(inode_item,
1752 BTRFS_I(inode)->generation);
1753 btrfs_set_stack_inode_sequence(inode_item,
1754 inode_peek_iversion(inode));
1755 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1756 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1757 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1758 btrfs_set_stack_inode_block_group(inode_item, 0);
1759
1760 btrfs_set_stack_timespec_sec(&inode_item->atime,
1761 inode->i_atime.tv_sec);
1762 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1763 inode->i_atime.tv_nsec);
1764
1765 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1766 inode->i_mtime.tv_sec);
1767 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1768 inode->i_mtime.tv_nsec);
1769
1770 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1771 inode->i_ctime.tv_sec);
1772 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1773 inode->i_ctime.tv_nsec);
1774
1775 btrfs_set_stack_timespec_sec(&inode_item->otime,
1776 BTRFS_I(inode)->i_otime.tv_sec);
1777 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1778 BTRFS_I(inode)->i_otime.tv_nsec);
1779 }
1780
btrfs_fill_inode(struct inode * inode,u32 * rdev)1781 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1782 {
1783 struct btrfs_delayed_node *delayed_node;
1784 struct btrfs_inode_item *inode_item;
1785
1786 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1787 if (!delayed_node)
1788 return -ENOENT;
1789
1790 mutex_lock(&delayed_node->mutex);
1791 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1792 mutex_unlock(&delayed_node->mutex);
1793 btrfs_release_delayed_node(delayed_node);
1794 return -ENOENT;
1795 }
1796
1797 inode_item = &delayed_node->inode_item;
1798
1799 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1800 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1801 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1802 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1803 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1804 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1805 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1806 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1807
1808 inode_set_iversion_queried(inode,
1809 btrfs_stack_inode_sequence(inode_item));
1810 inode->i_rdev = 0;
1811 *rdev = btrfs_stack_inode_rdev(inode_item);
1812 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1813
1814 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1815 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1816
1817 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1818 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1819
1820 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1821 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1822
1823 BTRFS_I(inode)->i_otime.tv_sec =
1824 btrfs_stack_timespec_sec(&inode_item->otime);
1825 BTRFS_I(inode)->i_otime.tv_nsec =
1826 btrfs_stack_timespec_nsec(&inode_item->otime);
1827
1828 inode->i_generation = BTRFS_I(inode)->generation;
1829 BTRFS_I(inode)->index_cnt = (u64)-1;
1830
1831 mutex_unlock(&delayed_node->mutex);
1832 btrfs_release_delayed_node(delayed_node);
1833 return 0;
1834 }
1835
btrfs_delayed_update_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode)1836 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1837 struct btrfs_root *root, struct inode *inode)
1838 {
1839 struct btrfs_delayed_node *delayed_node;
1840 int ret = 0;
1841
1842 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1843 if (IS_ERR(delayed_node))
1844 return PTR_ERR(delayed_node);
1845
1846 mutex_lock(&delayed_node->mutex);
1847 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1848 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1849 goto release_node;
1850 }
1851
1852 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1853 delayed_node);
1854 if (ret)
1855 goto release_node;
1856
1857 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1858 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1859 delayed_node->count++;
1860 atomic_inc(&root->fs_info->delayed_root->items);
1861 release_node:
1862 mutex_unlock(&delayed_node->mutex);
1863 btrfs_release_delayed_node(delayed_node);
1864 return ret;
1865 }
1866
btrfs_delayed_delete_inode_ref(struct btrfs_inode * inode)1867 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1868 {
1869 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1870 struct btrfs_delayed_node *delayed_node;
1871
1872 /*
1873 * we don't do delayed inode updates during log recovery because it
1874 * leads to enospc problems. This means we also can't do
1875 * delayed inode refs
1876 */
1877 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1878 return -EAGAIN;
1879
1880 delayed_node = btrfs_get_or_create_delayed_node(inode);
1881 if (IS_ERR(delayed_node))
1882 return PTR_ERR(delayed_node);
1883
1884 /*
1885 * We don't reserve space for inode ref deletion is because:
1886 * - We ONLY do async inode ref deletion for the inode who has only
1887 * one link(i_nlink == 1), it means there is only one inode ref.
1888 * And in most case, the inode ref and the inode item are in the
1889 * same leaf, and we will deal with them at the same time.
1890 * Since we are sure we will reserve the space for the inode item,
1891 * it is unnecessary to reserve space for inode ref deletion.
1892 * - If the inode ref and the inode item are not in the same leaf,
1893 * We also needn't worry about enospc problem, because we reserve
1894 * much more space for the inode update than it needs.
1895 * - At the worst, we can steal some space from the global reservation.
1896 * It is very rare.
1897 */
1898 mutex_lock(&delayed_node->mutex);
1899 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1900 goto release_node;
1901
1902 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1903 delayed_node->count++;
1904 atomic_inc(&fs_info->delayed_root->items);
1905 release_node:
1906 mutex_unlock(&delayed_node->mutex);
1907 btrfs_release_delayed_node(delayed_node);
1908 return 0;
1909 }
1910
__btrfs_kill_delayed_node(struct btrfs_delayed_node * delayed_node)1911 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1912 {
1913 struct btrfs_root *root = delayed_node->root;
1914 struct btrfs_fs_info *fs_info = root->fs_info;
1915 struct btrfs_delayed_item *curr_item, *prev_item;
1916
1917 mutex_lock(&delayed_node->mutex);
1918 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1919 while (curr_item) {
1920 btrfs_delayed_item_release_metadata(root, curr_item);
1921 prev_item = curr_item;
1922 curr_item = __btrfs_next_delayed_item(prev_item);
1923 btrfs_release_delayed_item(prev_item);
1924 }
1925
1926 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1927 while (curr_item) {
1928 btrfs_delayed_item_release_metadata(root, curr_item);
1929 prev_item = curr_item;
1930 curr_item = __btrfs_next_delayed_item(prev_item);
1931 btrfs_release_delayed_item(prev_item);
1932 }
1933
1934 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1935 btrfs_release_delayed_iref(delayed_node);
1936
1937 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1938 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1939 btrfs_release_delayed_inode(delayed_node);
1940 }
1941 mutex_unlock(&delayed_node->mutex);
1942 }
1943
btrfs_kill_delayed_inode_items(struct btrfs_inode * inode)1944 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1945 {
1946 struct btrfs_delayed_node *delayed_node;
1947
1948 delayed_node = btrfs_get_delayed_node(inode);
1949 if (!delayed_node)
1950 return;
1951
1952 __btrfs_kill_delayed_node(delayed_node);
1953 btrfs_release_delayed_node(delayed_node);
1954 }
1955
btrfs_kill_all_delayed_nodes(struct btrfs_root * root)1956 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1957 {
1958 u64 inode_id = 0;
1959 struct btrfs_delayed_node *delayed_nodes[8];
1960 int i, n;
1961
1962 while (1) {
1963 spin_lock(&root->inode_lock);
1964 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1965 (void **)delayed_nodes, inode_id,
1966 ARRAY_SIZE(delayed_nodes));
1967 if (!n) {
1968 spin_unlock(&root->inode_lock);
1969 break;
1970 }
1971
1972 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1973 for (i = 0; i < n; i++) {
1974 /*
1975 * Don't increase refs in case the node is dead and
1976 * about to be removed from the tree in the loop below
1977 */
1978 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1979 delayed_nodes[i] = NULL;
1980 }
1981 spin_unlock(&root->inode_lock);
1982
1983 for (i = 0; i < n; i++) {
1984 if (!delayed_nodes[i])
1985 continue;
1986 __btrfs_kill_delayed_node(delayed_nodes[i]);
1987 btrfs_release_delayed_node(delayed_nodes[i]);
1988 }
1989 }
1990 }
1991
btrfs_destroy_delayed_inodes(struct btrfs_fs_info * fs_info)1992 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1993 {
1994 struct btrfs_delayed_node *curr_node, *prev_node;
1995
1996 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1997 while (curr_node) {
1998 __btrfs_kill_delayed_node(curr_node);
1999
2000 prev_node = curr_node;
2001 curr_node = btrfs_next_delayed_node(curr_node);
2002 btrfs_release_delayed_node(prev_node);
2003 }
2004 }
2005
2006