Lines Matching +full:root +full:- +full:node

1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/error-injection.h>
14 #include "disk-io.h"
19 #include "async-thread.h"
20 #include "free-space-cache.h"
22 #include "print-tree.h"
23 #include "delalloc-space.h"
24 #include "block-group.h"
40 * ------------------------------------------------------------------
47 * 1. Mark the target block group read-only
78 * map address of tree root to tree
131 /* map start of tree root to corresponding reloc tree */
158 struct btrfs_backref_node *node) in mark_block_processed() argument
162 if (node->level == 0 || in mark_block_processed()
163 in_range(node->bytenr, rc->block_group->start, in mark_block_processed()
164 rc->block_group->length)) { in mark_block_processed()
165 blocksize = rc->extent_root->fs_info->nodesize; in mark_block_processed()
166 set_extent_bits(&rc->processed_blocks, node->bytenr, in mark_block_processed()
167 node->bytenr + blocksize - 1, EXTENT_DIRTY); in mark_block_processed()
169 node->processed = 1; in mark_block_processed()
175 tree->rb_root = RB_ROOT; in mapping_tree_init()
176 spin_lock_init(&tree->lock); in mapping_tree_init()
180 * walk up backref nodes until reach node presents tree root
183 struct btrfs_backref_node *node, in walk_up_backref() argument
189 while (!list_empty(&node->upper)) { in walk_up_backref()
190 edge = list_entry(node->upper.next, in walk_up_backref()
193 node = edge->node[UPPER]; in walk_up_backref()
195 BUG_ON(node->detached); in walk_up_backref()
197 return node; in walk_up_backref()
211 edge = edges[idx - 1]; in walk_down_backref()
212 lower = edge->node[LOWER]; in walk_down_backref()
213 if (list_is_last(&edge->list[LOWER], &lower->upper)) { in walk_down_backref()
214 idx--; in walk_down_backref()
217 edge = list_entry(edge->list[LOWER].next, in walk_down_backref()
219 edges[idx - 1] = edge; in walk_down_backref()
221 return edge->node[UPPER]; in walk_down_backref()
228 struct btrfs_backref_node *node, u64 bytenr) in update_backref_node() argument
231 rb_erase(&node->rb_node, &cache->rb_root); in update_backref_node()
232 node->bytenr = bytenr; in update_backref_node()
233 rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node); in update_backref_node()
235 btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST); in update_backref_node()
244 struct btrfs_backref_node *node; in update_backref_cache() local
247 if (cache->last_trans == 0) { in update_backref_cache()
248 cache->last_trans = trans->transid; in update_backref_cache()
252 if (cache->last_trans == trans->transid) in update_backref_cache()
260 while (!list_empty(&cache->detached)) { in update_backref_cache()
261 node = list_entry(cache->detached.next, in update_backref_cache()
263 btrfs_backref_cleanup_node(cache, node); in update_backref_cache()
266 while (!list_empty(&cache->changed)) { in update_backref_cache()
267 node = list_entry(cache->changed.next, in update_backref_cache()
269 list_del_init(&node->list); in update_backref_cache()
270 BUG_ON(node->pending); in update_backref_cache()
271 update_backref_node(cache, node, node->new_bytenr); in update_backref_cache()
279 list_for_each_entry(node, &cache->pending[level], list) { in update_backref_cache()
280 BUG_ON(!node->pending); in update_backref_cache()
281 if (node->bytenr == node->new_bytenr) in update_backref_cache()
283 update_backref_node(cache, node, node->new_bytenr); in update_backref_cache()
287 cache->last_trans = 0; in update_backref_cache()
291 static bool reloc_root_is_dead(struct btrfs_root *root) in reloc_root_is_dead() argument
299 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)) in reloc_root_is_dead()
308 * This is enough for most callers, as they don't distinguish dead reloc root
309 * from no reloc root. But btrfs_should_ignore_reloc_root() below is a
312 static bool have_reloc_root(struct btrfs_root *root) in have_reloc_root() argument
314 if (reloc_root_is_dead(root)) in have_reloc_root()
316 if (!root->reloc_root) in have_reloc_root()
321 int btrfs_should_ignore_reloc_root(struct btrfs_root *root) in btrfs_should_ignore_reloc_root() argument
325 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in btrfs_should_ignore_reloc_root()
328 /* This root has been merged with its reloc tree, we can ignore it */ in btrfs_should_ignore_reloc_root()
329 if (reloc_root_is_dead(root)) in btrfs_should_ignore_reloc_root()
332 reloc_root = root->reloc_root; in btrfs_should_ignore_reloc_root()
336 if (btrfs_header_generation(reloc_root->commit_root) == in btrfs_should_ignore_reloc_root()
337 root->fs_info->running_transaction->transid) in btrfs_should_ignore_reloc_root()
342 * so backref node for the fs tree root is useless for in btrfs_should_ignore_reloc_root()
349 * find reloc tree by address of tree root
353 struct reloc_control *rc = fs_info->reloc_ctl; in find_reloc_root()
355 struct mapping_node *node; in find_reloc_root() local
356 struct btrfs_root *root = NULL; in find_reloc_root() local
359 spin_lock(&rc->reloc_root_tree.lock); in find_reloc_root()
360 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr); in find_reloc_root()
362 node = rb_entry(rb_node, struct mapping_node, rb_node); in find_reloc_root()
363 root = (struct btrfs_root *)node->data; in find_reloc_root()
365 spin_unlock(&rc->reloc_root_tree.lock); in find_reloc_root()
366 return btrfs_grab_root(root); in find_reloc_root()
372 * - Cleanup the children edges and nodes
373 * If child node is also orphan (no parent) during cleanup, then the child
374 * node will also be cleaned up.
376 * - Freeing up leaves (level 0), keeps nodes detached
377 * For nodes, the node is still cached as "detached"
379 * Return false if @node is not in the @useless_nodes list.
380 * Return true if @node is in the @useless_nodes list.
383 struct btrfs_backref_node *node) in handle_useless_nodes() argument
385 struct btrfs_backref_cache *cache = &rc->backref_cache; in handle_useless_nodes()
386 struct list_head *useless_node = &cache->useless_node; in handle_useless_nodes()
394 list_del_init(&cur->list); in handle_useless_nodes()
396 /* Only tree root nodes can be added to @useless_nodes */ in handle_useless_nodes()
397 ASSERT(list_empty(&cur->upper)); in handle_useless_nodes()
399 if (cur == node) in handle_useless_nodes()
402 /* The node is the lowest node */ in handle_useless_nodes()
403 if (cur->lowest) { in handle_useless_nodes()
404 list_del_init(&cur->lower); in handle_useless_nodes()
405 cur->lowest = 0; in handle_useless_nodes()
409 while (!list_empty(&cur->lower)) { in handle_useless_nodes()
413 edge = list_entry(cur->lower.next, in handle_useless_nodes()
415 list_del(&edge->list[UPPER]); in handle_useless_nodes()
416 list_del(&edge->list[LOWER]); in handle_useless_nodes()
417 lower = edge->node[LOWER]; in handle_useless_nodes()
420 /* Child node is also orphan, queue for cleanup */ in handle_useless_nodes()
421 if (list_empty(&lower->upper)) in handle_useless_nodes()
422 list_add(&lower->list, useless_node); in handle_useless_nodes()
432 if (cur->level > 0) { in handle_useless_nodes()
433 list_add(&cur->list, &cache->detached); in handle_useless_nodes()
434 cur->detached = 1; in handle_useless_nodes()
436 rb_erase(&cur->rb_node, &cache->rb_root); in handle_useless_nodes()
444 * Build backref tree for a given tree block. Root of the backref tree
446 * b-trees that reference the tree block.
450 * these upper level blocks recursively. The recursion stops when tree root is
462 struct btrfs_backref_cache *cache = &rc->backref_cache; in build_backref_tree()
466 struct btrfs_backref_node *node = NULL; in build_backref_tree() local
471 iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info, GFP_NOFS); in build_backref_tree()
473 return ERR_PTR(-ENOMEM); in build_backref_tree()
476 err = -ENOMEM; in build_backref_tree()
480 node = btrfs_backref_alloc_node(cache, bytenr, level); in build_backref_tree()
481 if (!node) { in build_backref_tree()
482 err = -ENOMEM; in build_backref_tree()
486 node->lowest = 1; in build_backref_tree()
487 cur = node; in build_backref_tree()
489 /* Breadth-first search to build backref cache */ in build_backref_tree()
497 edge = list_first_entry_or_null(&cache->pending_edge, in build_backref_tree()
504 list_del_init(&edge->list[UPPER]); in build_backref_tree()
505 cur = edge->node[UPPER]; in build_backref_tree()
510 ret = btrfs_backref_finish_upper_links(cache, node); in build_backref_tree()
516 if (handle_useless_nodes(rc, node)) in build_backref_tree()
517 node = NULL; in build_backref_tree()
522 btrfs_backref_error_cleanup(cache, node); in build_backref_tree()
525 ASSERT(!node || !node->detached); in build_backref_tree()
526 ASSERT(list_empty(&cache->useless_node) && in build_backref_tree()
527 list_empty(&cache->pending_edge)); in build_backref_tree()
528 return node; in build_backref_tree()
532 * helper to add backref node for the newly created snapshot.
533 * the backref node is created by cloning backref node that
534 * corresponds to root of source tree
541 struct btrfs_root *reloc_root = src->reloc_root; in clone_backref_node()
542 struct btrfs_backref_cache *cache = &rc->backref_cache; in clone_backref_node()
543 struct btrfs_backref_node *node = NULL; in clone_backref_node() local
549 if (cache->last_trans > 0) in clone_backref_node()
552 rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start); in clone_backref_node()
554 node = rb_entry(rb_node, struct btrfs_backref_node, rb_node); in clone_backref_node()
555 if (node->detached) in clone_backref_node()
556 node = NULL; in clone_backref_node()
558 BUG_ON(node->new_bytenr != reloc_root->node->start); in clone_backref_node()
561 if (!node) { in clone_backref_node()
562 rb_node = rb_simple_search(&cache->rb_root, in clone_backref_node()
563 reloc_root->commit_root->start); in clone_backref_node()
565 node = rb_entry(rb_node, struct btrfs_backref_node, in clone_backref_node()
567 BUG_ON(node->detached); in clone_backref_node()
571 if (!node) in clone_backref_node()
574 new_node = btrfs_backref_alloc_node(cache, dest->node->start, in clone_backref_node()
575 node->level); in clone_backref_node()
577 return -ENOMEM; in clone_backref_node()
579 new_node->lowest = node->lowest; in clone_backref_node()
580 new_node->checked = 1; in clone_backref_node()
581 new_node->root = btrfs_grab_root(dest); in clone_backref_node()
582 ASSERT(new_node->root); in clone_backref_node()
584 if (!node->lowest) { in clone_backref_node()
585 list_for_each_entry(edge, &node->lower, list[UPPER]) { in clone_backref_node()
590 btrfs_backref_link_edge(new_edge, edge->node[LOWER], in clone_backref_node()
594 list_add_tail(&new_node->lower, &cache->leaves); in clone_backref_node()
597 rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr, in clone_backref_node()
598 &new_node->rb_node); in clone_backref_node()
600 btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST); in clone_backref_node()
602 if (!new_node->lowest) { in clone_backref_node()
603 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { in clone_backref_node()
604 list_add_tail(&new_edge->list[LOWER], in clone_backref_node()
605 &new_edge->node[LOWER]->upper); in clone_backref_node()
610 while (!list_empty(&new_node->lower)) { in clone_backref_node()
611 new_edge = list_entry(new_node->lower.next, in clone_backref_node()
613 list_del(&new_edge->list[UPPER]); in clone_backref_node()
617 return -ENOMEM; in clone_backref_node()
621 * helper to add 'address of tree root -> reloc tree' mapping
623 static int __must_check __add_reloc_root(struct btrfs_root *root) in __add_reloc_root() argument
625 struct btrfs_fs_info *fs_info = root->fs_info; in __add_reloc_root()
627 struct mapping_node *node; in __add_reloc_root() local
628 struct reloc_control *rc = fs_info->reloc_ctl; in __add_reloc_root()
630 node = kmalloc(sizeof(*node), GFP_NOFS); in __add_reloc_root()
631 if (!node) in __add_reloc_root()
632 return -ENOMEM; in __add_reloc_root()
634 node->bytenr = root->commit_root->start; in __add_reloc_root()
635 node->data = root; in __add_reloc_root()
637 spin_lock(&rc->reloc_root_tree.lock); in __add_reloc_root()
638 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, in __add_reloc_root()
639 node->bytenr, &node->rb_node); in __add_reloc_root()
640 spin_unlock(&rc->reloc_root_tree.lock); in __add_reloc_root()
643 "Duplicate root found for start=%llu while inserting into relocation tree", in __add_reloc_root()
644 node->bytenr); in __add_reloc_root()
645 return -EEXIST; in __add_reloc_root()
648 list_add_tail(&root->root_list, &rc->reloc_roots); in __add_reloc_root()
653 * helper to delete the 'address of tree root -> reloc tree'
656 static void __del_reloc_root(struct btrfs_root *root) in __del_reloc_root() argument
658 struct btrfs_fs_info *fs_info = root->fs_info; in __del_reloc_root()
660 struct mapping_node *node = NULL; in __del_reloc_root() local
661 struct reloc_control *rc = fs_info->reloc_ctl; in __del_reloc_root()
664 if (rc && root->node) { in __del_reloc_root()
665 spin_lock(&rc->reloc_root_tree.lock); in __del_reloc_root()
666 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, in __del_reloc_root()
667 root->commit_root->start); in __del_reloc_root()
669 node = rb_entry(rb_node, struct mapping_node, rb_node); in __del_reloc_root()
670 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); in __del_reloc_root()
671 RB_CLEAR_NODE(&node->rb_node); in __del_reloc_root()
673 spin_unlock(&rc->reloc_root_tree.lock); in __del_reloc_root()
674 ASSERT(!node || (struct btrfs_root *)node->data == root); in __del_reloc_root()
678 * We only put the reloc root here if it's on the list. There's a lot in __del_reloc_root()
679 * of places where the pattern is to splice the rc->reloc_roots, process in __del_reloc_root()
680 * the reloc roots, and then add the reloc root back onto in __del_reloc_root()
681 * rc->reloc_roots. If we call __del_reloc_root while it's off of the in __del_reloc_root()
685 spin_lock(&fs_info->trans_lock); in __del_reloc_root()
686 if (!list_empty(&root->root_list)) { in __del_reloc_root()
688 list_del_init(&root->root_list); in __del_reloc_root()
690 spin_unlock(&fs_info->trans_lock); in __del_reloc_root()
692 btrfs_put_root(root); in __del_reloc_root()
693 kfree(node); in __del_reloc_root()
697 * helper to update the 'address of tree root -> reloc tree'
700 static int __update_reloc_root(struct btrfs_root *root) in __update_reloc_root() argument
702 struct btrfs_fs_info *fs_info = root->fs_info; in __update_reloc_root()
704 struct mapping_node *node = NULL; in __update_reloc_root() local
705 struct reloc_control *rc = fs_info->reloc_ctl; in __update_reloc_root()
707 spin_lock(&rc->reloc_root_tree.lock); in __update_reloc_root()
708 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, in __update_reloc_root()
709 root->commit_root->start); in __update_reloc_root()
711 node = rb_entry(rb_node, struct mapping_node, rb_node); in __update_reloc_root()
712 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); in __update_reloc_root()
714 spin_unlock(&rc->reloc_root_tree.lock); in __update_reloc_root()
716 if (!node) in __update_reloc_root()
718 BUG_ON((struct btrfs_root *)node->data != root); in __update_reloc_root()
720 spin_lock(&rc->reloc_root_tree.lock); in __update_reloc_root()
721 node->bytenr = root->node->start; in __update_reloc_root()
722 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, in __update_reloc_root()
723 node->bytenr, &node->rb_node); in __update_reloc_root()
724 spin_unlock(&rc->reloc_root_tree.lock); in __update_reloc_root()
726 btrfs_backref_panic(fs_info, node->bytenr, -EEXIST); in __update_reloc_root()
731 struct btrfs_root *root, u64 objectid) in create_reloc_root() argument
733 struct btrfs_fs_info *fs_info = root->fs_info; in create_reloc_root()
743 return ERR_PTR(-ENOMEM); in create_reloc_root()
749 if (root->root_key.objectid == objectid) { in create_reloc_root()
753 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, in create_reloc_root()
760 * root - like this ctree.c:btrfs_block_can_be_shared() behaves in create_reloc_root()
761 * correctly (returns true) when the relocation root is created in create_reloc_root()
766 commit_root_gen = btrfs_header_generation(root->commit_root); in create_reloc_root()
767 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen); in create_reloc_root()
776 ret = btrfs_copy_root(trans, root, root->node, &eb, in create_reloc_root()
788 memcpy(root_item, &root->root_item, sizeof(*root_item)); in create_reloc_root()
789 btrfs_set_root_bytenr(root_item, eb->start); in create_reloc_root()
791 btrfs_set_root_generation(root_item, trans->transid); in create_reloc_root()
793 if (root->root_key.objectid == objectid) { in create_reloc_root()
795 memset(&root_item->drop_progress, 0, in create_reloc_root()
803 ret = btrfs_insert_root(trans, fs_info->tree_root, in create_reloc_root()
810 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key); in create_reloc_root()
815 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); in create_reloc_root()
816 reloc_root->last_trans = trans->transid; in create_reloc_root()
828 * snapshot of the fs tree with special root objectid.
831 * root->reloc_root, and another for being on the rc->reloc_roots list.
834 struct btrfs_root *root) in btrfs_init_reloc_root() argument
836 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_init_reloc_root()
838 struct reloc_control *rc = fs_info->reloc_ctl; in btrfs_init_reloc_root()
850 if (reloc_root_is_dead(root)) in btrfs_init_reloc_root()
856 * corresponding fs root, and then here we update the last trans for the in btrfs_init_reloc_root()
857 * reloc root. This means that we have to do this for the entire life in btrfs_init_reloc_root()
858 * of the reloc root, regardless of which stage of the relocation we are in btrfs_init_reloc_root()
861 if (root->reloc_root) { in btrfs_init_reloc_root()
862 reloc_root = root->reloc_root; in btrfs_init_reloc_root()
863 reloc_root->last_trans = trans->transid; in btrfs_init_reloc_root()
871 if (!rc->create_reloc_tree || in btrfs_init_reloc_root()
872 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) in btrfs_init_reloc_root()
875 if (!trans->reloc_reserved) { in btrfs_init_reloc_root()
876 rsv = trans->block_rsv; in btrfs_init_reloc_root()
877 trans->block_rsv = rc->block_rsv; in btrfs_init_reloc_root()
880 reloc_root = create_reloc_root(trans, root, root->root_key.objectid); in btrfs_init_reloc_root()
882 trans->block_rsv = rsv; in btrfs_init_reloc_root()
887 ASSERT(ret != -EEXIST); in btrfs_init_reloc_root()
893 root->reloc_root = btrfs_grab_root(reloc_root); in btrfs_init_reloc_root()
898 * update root item of reloc tree
901 struct btrfs_root *root) in btrfs_update_reloc_root() argument
903 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_update_reloc_root()
908 if (!have_reloc_root(root)) in btrfs_update_reloc_root()
911 reloc_root = root->reloc_root; in btrfs_update_reloc_root()
912 root_item = &reloc_root->root_item; in btrfs_update_reloc_root()
916 * the root. We have the ref for root->reloc_root, but just in case in btrfs_update_reloc_root()
917 * hold it while we update the reloc root. in btrfs_update_reloc_root()
921 /* root->reloc_root will stay until current relocation finished */ in btrfs_update_reloc_root()
922 if (fs_info->reloc_ctl->merge_reloc_tree && in btrfs_update_reloc_root()
924 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); in btrfs_update_reloc_root()
933 if (reloc_root->commit_root != reloc_root->node) { in btrfs_update_reloc_root()
935 btrfs_set_root_node(root_item, reloc_root->node); in btrfs_update_reloc_root()
936 free_extent_buffer(reloc_root->commit_root); in btrfs_update_reloc_root()
937 reloc_root->commit_root = btrfs_root_node(reloc_root); in btrfs_update_reloc_root()
940 ret = btrfs_update_root(trans, fs_info->tree_root, in btrfs_update_reloc_root()
941 &reloc_root->root_key, root_item); in btrfs_update_reloc_root()
950 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) in find_next_inode() argument
952 struct rb_node *node; in find_next_inode() local
957 spin_lock(&root->inode_lock); in find_next_inode()
959 node = root->inode_tree.rb_node; in find_next_inode()
961 while (node) { in find_next_inode()
962 prev = node; in find_next_inode()
963 entry = rb_entry(node, struct btrfs_inode, rb_node); in find_next_inode()
966 node = node->rb_left; in find_next_inode()
968 node = node->rb_right; in find_next_inode()
972 if (!node) { in find_next_inode()
976 node = prev; in find_next_inode()
982 while (node) { in find_next_inode()
983 entry = rb_entry(node, struct btrfs_inode, rb_node); in find_next_inode()
984 inode = igrab(&entry->vfs_inode); in find_next_inode()
986 spin_unlock(&root->inode_lock); in find_next_inode()
991 if (cond_resched_lock(&root->inode_lock)) in find_next_inode()
994 node = rb_next(node); in find_next_inode()
996 spin_unlock(&root->inode_lock); in find_next_inode()
1006 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; in get_new_location() local
1014 return -ENOMEM; in get_new_location()
1016 bytenr -= BTRFS_I(reloc_inode)->index_cnt; in get_new_location()
1017 ret = btrfs_lookup_file_extent(NULL, root, path, in get_new_location()
1022 ret = -ENOENT; in get_new_location()
1026 leaf = path->nodes[0]; in get_new_location()
1027 fi = btrfs_item_ptr(leaf, path->slots[0], in get_new_location()
1036 ret = -EINVAL; in get_new_location()
1054 struct btrfs_root *root, in replace_file_extents() argument
1057 struct btrfs_fs_info *fs_info = root->fs_info; in replace_file_extents()
1072 if (rc->stage != UPDATE_DATA_PTRS) in replace_file_extents()
1076 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) in replace_file_extents()
1077 parent = leaf->start; in replace_file_extents()
1097 if (!in_range(bytenr, rc->block_group->start, in replace_file_extents()
1098 rc->block_group->length)) in replace_file_extents()
1105 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { in replace_file_extents()
1107 inode = find_next_inode(root, key.objectid); in replace_file_extents()
1111 inode = find_next_inode(root, key.objectid); in replace_file_extents()
1117 fs_info->sectorsize)); in replace_file_extents()
1118 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); in replace_file_extents()
1119 end--; in replace_file_extents()
1120 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, in replace_file_extents()
1127 unlock_extent(&BTRFS_I(inode)->io_tree, in replace_file_extents()
1132 ret = get_new_location(rc->data_inode, &new_bytenr, in replace_file_extents()
1145 key.offset -= btrfs_file_extent_offset(leaf, fi); in replace_file_extents()
1148 ref.real_root = root->root_key.objectid; in replace_file_extents()
1159 ref.real_root = root->root_key.objectid; in replace_file_extents()
1182 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); in memcmp_node_keys()
1201 struct btrfs_fs_info *fs_info = dest->fs_info; in replace_path()
1217 ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); in replace_path()
1218 ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); in replace_path()
1220 last_snapshot = btrfs_root_last_snapshot(&src->root_item); in replace_path()
1222 slot = path->slots[lowest_level]; in replace_path()
1223 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); in replace_path()
1245 next_key->objectid = (u64)-1; in replace_path()
1246 next_key->type = (u8)-1; in replace_path()
1247 next_key->offset = (u64)-1; in replace_path()
1259 slot--; in replace_path()
1265 blocksize = fs_info->nodesize; in replace_path()
1269 eb = path->nodes[level]; in replace_path()
1271 path->slots[level]); in replace_path()
1273 path->slots[level]); in replace_path()
1322 btrfs_node_key_to_cpu(path->nodes[level], &key, in replace_path()
1323 path->slots[level]); in replace_path()
1326 path->lowest_level = level; in replace_path()
1328 path->lowest_level = 0; in replace_path()
1331 ret = -ENOENT; in replace_path()
1347 * CoW on the subtree root node before transaction commit. in replace_path()
1350 rc->block_group, parent, slot, in replace_path()
1351 path->nodes[level], path->slots[level], in replace_path()
1362 btrfs_set_node_blockptr(path->nodes[level], in replace_path()
1363 path->slots[level], old_bytenr); in replace_path()
1364 btrfs_set_node_ptr_generation(path->nodes[level], in replace_path()
1365 path->slots[level], old_ptr_gen); in replace_path()
1366 btrfs_mark_buffer_dirty(path->nodes[level]); in replace_path()
1369 blocksize, path->nodes[level]->start); in replace_path()
1371 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid); in replace_path()
1380 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid); in replace_path()
1388 blocksize, path->nodes[level]->start); in replace_path()
1389 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid); in replace_path()
1399 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid); in replace_path()
1421 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, in walk_up_reloc_tree() argument
1429 last_snapshot = btrfs_root_last_snapshot(&root->root_item); in walk_up_reloc_tree()
1432 free_extent_buffer(path->nodes[i]); in walk_up_reloc_tree()
1433 path->nodes[i] = NULL; in walk_up_reloc_tree()
1436 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { in walk_up_reloc_tree()
1437 eb = path->nodes[i]; in walk_up_reloc_tree()
1439 while (path->slots[i] + 1 < nritems) { in walk_up_reloc_tree()
1440 path->slots[i]++; in walk_up_reloc_tree()
1441 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= in walk_up_reloc_tree()
1448 free_extent_buffer(path->nodes[i]); in walk_up_reloc_tree()
1449 path->nodes[i] = NULL; in walk_up_reloc_tree()
1458 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, in walk_down_reloc_tree() argument
1467 last_snapshot = btrfs_root_last_snapshot(&root->root_item); in walk_down_reloc_tree()
1469 for (i = *level; i > 0; i--) { in walk_down_reloc_tree()
1470 eb = path->nodes[i]; in walk_down_reloc_tree()
1472 while (path->slots[i] < nritems) { in walk_down_reloc_tree()
1473 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); in walk_down_reloc_tree()
1476 path->slots[i]++; in walk_down_reloc_tree()
1478 if (path->slots[i] >= nritems) { in walk_down_reloc_tree()
1489 eb = btrfs_read_node_slot(eb, path->slots[i]); in walk_down_reloc_tree()
1492 BUG_ON(btrfs_header_level(eb) != i - 1); in walk_down_reloc_tree()
1493 path->nodes[i - 1] = eb; in walk_down_reloc_tree()
1494 path->slots[i - 1] = 0; in walk_down_reloc_tree()
1503 static int invalidate_extent_cache(struct btrfs_root *root, in invalidate_extent_cache() argument
1507 struct btrfs_fs_info *fs_info = root->fs_info; in invalidate_extent_cache()
1513 objectid = min_key->objectid; in invalidate_extent_cache()
1518 if (objectid > max_key->objectid) in invalidate_extent_cache()
1521 inode = find_next_inode(root, objectid); in invalidate_extent_cache()
1526 if (ino > max_key->objectid) { in invalidate_extent_cache()
1532 if (!S_ISREG(inode->i_mode)) in invalidate_extent_cache()
1535 if (unlikely(min_key->objectid == ino)) { in invalidate_extent_cache()
1536 if (min_key->type > BTRFS_EXTENT_DATA_KEY) in invalidate_extent_cache()
1538 if (min_key->type < BTRFS_EXTENT_DATA_KEY) in invalidate_extent_cache()
1541 start = min_key->offset; in invalidate_extent_cache()
1542 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize)); in invalidate_extent_cache()
1548 if (unlikely(max_key->objectid == ino)) { in invalidate_extent_cache()
1549 if (max_key->type < BTRFS_EXTENT_DATA_KEY) in invalidate_extent_cache()
1551 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { in invalidate_extent_cache()
1552 end = (u64)-1; in invalidate_extent_cache()
1554 if (max_key->offset == 0) in invalidate_extent_cache()
1556 end = max_key->offset; in invalidate_extent_cache()
1557 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); in invalidate_extent_cache()
1558 end--; in invalidate_extent_cache()
1561 end = (u64)-1; in invalidate_extent_cache()
1565 lock_extent(&BTRFS_I(inode)->io_tree, start, end); in invalidate_extent_cache()
1567 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); in invalidate_extent_cache()
1577 if (!path->nodes[level]) in find_next_key()
1579 if (path->slots[level] + 1 < in find_next_key()
1580 btrfs_header_nritems(path->nodes[level])) { in find_next_key()
1581 btrfs_node_key_to_cpu(path->nodes[level], key, in find_next_key()
1582 path->slots[level] + 1); in find_next_key()
1595 struct btrfs_root *root) in insert_dirty_subvol() argument
1597 struct btrfs_root *reloc_root = root->reloc_root; in insert_dirty_subvol()
1601 /* @root must be a subvolume tree root with a valid reloc tree */ in insert_dirty_subvol()
1602 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); in insert_dirty_subvol()
1605 reloc_root_item = &reloc_root->root_item; in insert_dirty_subvol()
1606 memset(&reloc_root_item->drop_progress, 0, in insert_dirty_subvol()
1607 sizeof(reloc_root_item->drop_progress)); in insert_dirty_subvol()
1610 ret = btrfs_update_reloc_root(trans, root); in insert_dirty_subvol()
1614 if (list_empty(&root->reloc_dirty_list)) { in insert_dirty_subvol()
1615 btrfs_grab_root(root); in insert_dirty_subvol()
1616 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots); in insert_dirty_subvol()
1624 struct btrfs_root *root; in clean_dirty_subvols() local
1629 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots, in clean_dirty_subvols()
1631 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { in clean_dirty_subvols()
1632 /* Merged subvolume, cleanup its reloc root */ in clean_dirty_subvols()
1633 struct btrfs_root *reloc_root = root->reloc_root; in clean_dirty_subvols()
1635 list_del_init(&root->reloc_dirty_list); in clean_dirty_subvols()
1636 root->reloc_root = NULL; in clean_dirty_subvols()
1639 * root->reloc_root = NULL. Pairs with have_reloc_root. in clean_dirty_subvols()
1642 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); in clean_dirty_subvols()
1646 * ->reloc_root. If it fails however we must in clean_dirty_subvols()
1656 btrfs_put_root(root); in clean_dirty_subvols()
1659 ret2 = btrfs_drop_snapshot(root, 0, 1); in clean_dirty_subvols()
1661 btrfs_put_root(root); in clean_dirty_subvols()
1675 struct btrfs_root *root) in merge_reloc_root() argument
1677 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in merge_reloc_root()
1694 return -ENOMEM; in merge_reloc_root()
1695 path->reada = READA_FORWARD; in merge_reloc_root()
1697 reloc_root = root->reloc_root; in merge_reloc_root()
1698 root_item = &reloc_root->root_item; in merge_reloc_root()
1700 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { in merge_reloc_root()
1702 atomic_inc(&reloc_root->node->refs); in merge_reloc_root()
1703 path->nodes[level] = reloc_root->node; in merge_reloc_root()
1704 path->slots[level] = 0; in merge_reloc_root()
1706 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); in merge_reloc_root()
1710 path->lowest_level = level; in merge_reloc_root()
1712 path->lowest_level = 0; in merge_reloc_root()
1718 btrfs_node_key_to_cpu(path->nodes[level], &next_key, in merge_reloc_root()
1719 path->slots[level]); in merge_reloc_root()
1728 * block COW, we COW at most from level 1 to root level for each tree. in merge_reloc_root()
1734 min_reserved = fs_info->nodesize * reserve_level * 2; in merge_reloc_root()
1738 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved, in merge_reloc_root()
1742 trans = btrfs_start_transaction(root, 0); in merge_reloc_root()
1756 * btrfs_update_reloc_root() and update our root item in merge_reloc_root()
1759 reloc_root->last_trans = trans->transid; in merge_reloc_root()
1760 trans->block_rsv = rc->block_rsv; in merge_reloc_root()
1775 ret = replace_path(trans, rc, root, reloc_root, path, in merge_reloc_root()
1782 btrfs_node_key_to_cpu(path->nodes[level], &key, in merge_reloc_root()
1783 path->slots[level]); in merge_reloc_root()
1794 * this is OK since root refs == 1 in this case. in merge_reloc_root()
1796 btrfs_node_key(path->nodes[level], &root_item->drop_progress, in merge_reloc_root()
1797 path->slots[level]); in merge_reloc_root()
1805 if (replaced && rc->stage == UPDATE_DATA_PTRS) in merge_reloc_root()
1806 invalidate_extent_cache(root, &key, &next_key); in merge_reloc_root()
1811 * relocated and the block is tree root. in merge_reloc_root()
1813 leaf = btrfs_lock_root_node(root); in merge_reloc_root()
1814 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf, in merge_reloc_root()
1822 ret = insert_dirty_subvol(trans, rc, root); in merge_reloc_root()
1832 if (replaced && rc->stage == UPDATE_DATA_PTRS) in merge_reloc_root()
1833 invalidate_extent_cache(root, &key, &next_key); in merge_reloc_root()
1841 struct btrfs_root *root = rc->extent_root; in prepare_to_merge() local
1842 struct btrfs_fs_info *fs_info = root->fs_info; in prepare_to_merge()
1849 mutex_lock(&fs_info->reloc_mutex); in prepare_to_merge()
1850 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; in prepare_to_merge()
1851 rc->merging_rsv_size += rc->nodes_relocated * 2; in prepare_to_merge()
1852 mutex_unlock(&fs_info->reloc_mutex); in prepare_to_merge()
1856 num_bytes = rc->merging_rsv_size; in prepare_to_merge()
1857 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes, in prepare_to_merge()
1863 trans = btrfs_join_transaction(rc->extent_root); in prepare_to_merge()
1866 btrfs_block_rsv_release(fs_info, rc->block_rsv, in prepare_to_merge()
1872 if (num_bytes != rc->merging_rsv_size) { in prepare_to_merge()
1874 btrfs_block_rsv_release(fs_info, rc->block_rsv, in prepare_to_merge()
1880 rc->merge_reloc_tree = 1; in prepare_to_merge()
1882 while (!list_empty(&rc->reloc_roots)) { in prepare_to_merge()
1883 reloc_root = list_entry(rc->reloc_roots.next, in prepare_to_merge()
1885 list_del_init(&reloc_root->root_list); in prepare_to_merge()
1887 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, in prepare_to_merge()
1889 if (IS_ERR(root)) { in prepare_to_merge()
1891 * Even if we have an error we need this reloc root in prepare_to_merge()
1894 list_add(&reloc_root->root_list, &reloc_roots); in prepare_to_merge()
1895 btrfs_abort_transaction(trans, (int)PTR_ERR(root)); in prepare_to_merge()
1897 err = PTR_ERR(root); in prepare_to_merge()
1900 ASSERT(root->reloc_root == reloc_root); in prepare_to_merge()
1907 btrfs_set_root_refs(&reloc_root->root_item, 1); in prepare_to_merge()
1908 ret = btrfs_update_reloc_root(trans, root); in prepare_to_merge()
1911 * Even if we have an error we need this reloc root back on our in prepare_to_merge()
1914 list_add(&reloc_root->root_list, &reloc_roots); in prepare_to_merge()
1915 btrfs_put_root(root); in prepare_to_merge()
1925 list_splice(&reloc_roots, &rc->reloc_roots); in prepare_to_merge()
1946 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in merge_reloc_roots()
1947 struct btrfs_root *root; in merge_reloc_roots() local
1953 root = rc->extent_root; in merge_reloc_roots()
1961 mutex_lock(&fs_info->reloc_mutex); in merge_reloc_roots()
1962 list_splice_init(&rc->reloc_roots, &reloc_roots); in merge_reloc_roots()
1963 mutex_unlock(&fs_info->reloc_mutex); in merge_reloc_roots()
1970 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, in merge_reloc_roots()
1972 if (btrfs_root_refs(&reloc_root->root_item) > 0) { in merge_reloc_roots()
1973 if (IS_ERR(root)) { in merge_reloc_roots()
1976 * and if we didn't find the root then we marked in merge_reloc_roots()
1977 * the reloc root as a garbage root. For normal in merge_reloc_roots()
1978 * relocation obviously the root should exist in in merge_reloc_roots()
1983 ret = PTR_ERR(root); in merge_reloc_roots()
1986 if (root->reloc_root != reloc_root) { in merge_reloc_roots()
1993 ret = -EINVAL; in merge_reloc_roots()
1996 ret = merge_reloc_root(rc, root); in merge_reloc_roots()
1997 btrfs_put_root(root); in merge_reloc_roots()
1999 if (list_empty(&reloc_root->root_list)) in merge_reloc_roots()
2000 list_add_tail(&reloc_root->root_list, in merge_reloc_roots()
2005 if (!IS_ERR(root)) { in merge_reloc_roots()
2006 if (root->reloc_root == reloc_root) { in merge_reloc_roots()
2007 root->reloc_root = NULL; in merge_reloc_roots()
2011 &root->state); in merge_reloc_roots()
2012 btrfs_put_root(root); in merge_reloc_roots()
2015 list_del_init(&reloc_root->root_list); in merge_reloc_roots()
2016 /* Don't forget to queue this reloc root for cleanup */ in merge_reloc_roots()
2017 list_add_tail(&reloc_root->reloc_dirty_list, in merge_reloc_roots()
2018 &rc->dirty_subvol_roots); in merge_reloc_roots()
2031 /* new reloc root may be added */ in merge_reloc_roots()
2032 mutex_lock(&fs_info->reloc_mutex); in merge_reloc_roots()
2033 list_splice_init(&rc->reloc_roots, &reloc_roots); in merge_reloc_roots()
2034 mutex_unlock(&fs_info->reloc_mutex); in merge_reloc_roots()
2041 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); in merge_reloc_roots()
2046 * fine because we're bailing here, and we hold a reference on the root in merge_reloc_roots()
2048 * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root in merge_reloc_roots()
2069 struct btrfs_fs_info *fs_info = reloc_root->fs_info; in record_reloc_root_in_trans()
2070 struct btrfs_root *root; in record_reloc_root_in_trans() local
2073 if (reloc_root->last_trans == trans->transid) in record_reloc_root_in_trans()
2076 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); in record_reloc_root_in_trans()
2079 * This should succeed, since we can't have a reloc root without having in record_reloc_root_in_trans()
2080 * already looked up the actual root and created the reloc root for this in record_reloc_root_in_trans()
2081 * root. in record_reloc_root_in_trans()
2084 * reloc root without a corresponding root this could return ENOENT. in record_reloc_root_in_trans()
2086 if (IS_ERR(root)) { in record_reloc_root_in_trans()
2088 return PTR_ERR(root); in record_reloc_root_in_trans()
2090 if (root->reloc_root != reloc_root) { in record_reloc_root_in_trans()
2093 "root %llu has two reloc roots associated with it", in record_reloc_root_in_trans()
2094 reloc_root->root_key.offset); in record_reloc_root_in_trans()
2095 btrfs_put_root(root); in record_reloc_root_in_trans()
2096 return -EUCLEAN; in record_reloc_root_in_trans()
2098 ret = btrfs_record_root_in_trans(trans, root); in record_reloc_root_in_trans()
2099 btrfs_put_root(root); in record_reloc_root_in_trans()
2107 struct btrfs_backref_node *node, in select_reloc_root() argument
2111 struct btrfs_root *root; in select_reloc_root() local
2115 next = node; in select_reloc_root()
2119 root = next->root; in select_reloc_root()
2122 * If there is no root, then our references for this block are in select_reloc_root()
2124 * block that is owned by a root. in select_reloc_root()
2127 * non-SHAREABLE root then we have backrefs that resolve in select_reloc_root()
2133 if (!root) { in select_reloc_root()
2135 btrfs_err(trans->fs_info, in select_reloc_root()
2136 "bytenr %llu doesn't have a backref path ending in a root", in select_reloc_root()
2137 node->bytenr); in select_reloc_root()
2138 return ERR_PTR(-EUCLEAN); in select_reloc_root()
2140 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { in select_reloc_root()
2142 btrfs_err(trans->fs_info, in select_reloc_root()
2143 "bytenr %llu has multiple refs with one ending in a non-shareable root", in select_reloc_root()
2144 node->bytenr); in select_reloc_root()
2145 return ERR_PTR(-EUCLEAN); in select_reloc_root()
2148 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { in select_reloc_root()
2149 ret = record_reloc_root_in_trans(trans, root); in select_reloc_root()
2155 ret = btrfs_record_root_in_trans(trans, root); in select_reloc_root()
2158 root = root->reloc_root; in select_reloc_root()
2162 * root->reloc_root may not be set, return ENOENT in this case. in select_reloc_root()
2164 if (!root) in select_reloc_root()
2165 return ERR_PTR(-ENOENT); in select_reloc_root()
2167 if (next->new_bytenr != root->node->start) { in select_reloc_root()
2169 * We just created the reloc root, so we shouldn't have in select_reloc_root()
2170 * ->new_bytenr set and this shouldn't be in the changed in select_reloc_root()
2175 ASSERT(next->new_bytenr == 0); in select_reloc_root()
2176 ASSERT(list_empty(&next->list)); in select_reloc_root()
2177 if (next->new_bytenr || !list_empty(&next->list)) { in select_reloc_root()
2178 btrfs_err(trans->fs_info, in select_reloc_root()
2180 node->bytenr, next->bytenr); in select_reloc_root()
2181 return ERR_PTR(-EUCLEAN); in select_reloc_root()
2184 next->new_bytenr = root->node->start; in select_reloc_root()
2185 btrfs_put_root(next->root); in select_reloc_root()
2186 next->root = btrfs_grab_root(root); in select_reloc_root()
2187 ASSERT(next->root); in select_reloc_root()
2188 list_add_tail(&next->list, in select_reloc_root()
2189 &rc->backref_cache.changed); in select_reloc_root()
2195 root = NULL; in select_reloc_root()
2197 if (!next || next->level <= node->level) in select_reloc_root()
2200 if (!root) { in select_reloc_root()
2206 return ERR_PTR(-ENOENT); in select_reloc_root()
2209 next = node; in select_reloc_root()
2210 /* setup backref node path for btrfs_reloc_cow_block */ in select_reloc_root()
2212 rc->backref_cache.path[next->level] = next; in select_reloc_root()
2213 if (--index < 0) in select_reloc_root()
2215 next = edges[index]->node[UPPER]; in select_reloc_root()
2217 return root; in select_reloc_root()
2221 * Select a tree root for relocation.
2226 * Return a tree root pointer if the block is shareable.
2227 * Return -ENOENT if the block is root of reloc tree.
2230 struct btrfs_root *select_one_root(struct btrfs_backref_node *node) in select_one_root() argument
2233 struct btrfs_root *root; in select_one_root() local
2235 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; in select_one_root()
2238 next = node; in select_one_root()
2242 root = next->root; in select_one_root()
2246 * the way up a particular path, in this case return -EUCLEAN. in select_one_root()
2248 if (!root) in select_one_root()
2249 return ERR_PTR(-EUCLEAN); in select_one_root()
2251 /* No other choice for non-shareable tree */ in select_one_root()
2252 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in select_one_root()
2253 return root; in select_one_root()
2255 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) in select_one_root()
2256 fs_root = root; in select_one_root()
2258 if (next != node) in select_one_root()
2262 if (!next || next->level <= node->level) in select_one_root()
2267 return ERR_PTR(-ENOENT); in select_one_root()
2273 struct btrfs_backref_node *node, int reserve) in calcu_metadata_size() argument
2275 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in calcu_metadata_size()
2276 struct btrfs_backref_node *next = node; in calcu_metadata_size()
2278 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; in calcu_metadata_size()
2282 BUG_ON(reserve && node->processed); in calcu_metadata_size()
2287 if (next->processed && (reserve || next != node)) in calcu_metadata_size()
2290 num_bytes += fs_info->nodesize; in calcu_metadata_size()
2292 if (list_empty(&next->upper)) in calcu_metadata_size()
2295 edge = list_entry(next->upper.next, in calcu_metadata_size()
2298 next = edge->node[UPPER]; in calcu_metadata_size()
2307 struct btrfs_backref_node *node) in reserve_metadata_space() argument
2309 struct btrfs_root *root = rc->extent_root; in reserve_metadata_space() local
2310 struct btrfs_fs_info *fs_info = root->fs_info; in reserve_metadata_space()
2315 num_bytes = calcu_metadata_size(rc, node, 1) * 2; in reserve_metadata_space()
2317 trans->block_rsv = rc->block_rsv; in reserve_metadata_space()
2318 rc->reserved_bytes += num_bytes; in reserve_metadata_space()
2322 * If we get an enospc just kick back -EAGAIN so we know to drop the in reserve_metadata_space()
2325 ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes, in reserve_metadata_space()
2328 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES; in reserve_metadata_space()
2329 while (tmp <= rc->reserved_bytes) in reserve_metadata_space()
2338 rc->block_rsv->size = tmp + fs_info->nodesize * in reserve_metadata_space()
2340 return -EAGAIN; in reserve_metadata_space()
2355 struct btrfs_backref_node *node, in do_relocation() argument
2361 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; in do_relocation()
2362 struct btrfs_root *root; in do_relocation() local
2373 ASSERT(!lowest || !node->eb); in do_relocation()
2375 path->lowest_level = node->level + 1; in do_relocation()
2376 rc->backref_cache.path[node->level] = node; in do_relocation()
2377 list_for_each_entry(edge, &node->upper, list[LOWER]) { in do_relocation()
2382 upper = edge->node[UPPER]; in do_relocation()
2383 root = select_reloc_root(trans, rc, upper, edges); in do_relocation()
2384 if (IS_ERR(root)) { in do_relocation()
2385 ret = PTR_ERR(root); in do_relocation()
2389 if (upper->eb && !upper->locked) { in do_relocation()
2391 ret = btrfs_bin_search(upper->eb, key, &slot); in do_relocation()
2395 bytenr = btrfs_node_blockptr(upper->eb, slot); in do_relocation()
2396 if (node->eb->start == bytenr) in do_relocation()
2402 if (!upper->eb) { in do_relocation()
2403 ret = btrfs_search_slot(trans, root, key, path, 0, 1); in do_relocation()
2406 ret = -ENOENT; in do_relocation()
2412 if (!upper->eb) { in do_relocation()
2413 upper->eb = path->nodes[upper->level]; in do_relocation()
2414 path->nodes[upper->level] = NULL; in do_relocation()
2416 BUG_ON(upper->eb != path->nodes[upper->level]); in do_relocation()
2419 upper->locked = 1; in do_relocation()
2420 path->locks[upper->level] = 0; in do_relocation()
2422 slot = path->slots[upper->level]; in do_relocation()
2425 ret = btrfs_bin_search(upper->eb, key, &slot); in do_relocation()
2431 bytenr = btrfs_node_blockptr(upper->eb, slot); in do_relocation()
2433 if (bytenr != node->bytenr) { in do_relocation()
2434 btrfs_err(root->fs_info, in do_relocation()
2435 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu", in do_relocation()
2436 bytenr, node->bytenr, slot, in do_relocation()
2437 upper->eb->start); in do_relocation()
2438 ret = -EIO; in do_relocation()
2442 if (node->eb->start == bytenr) in do_relocation()
2446 blocksize = root->fs_info->nodesize; in do_relocation()
2447 eb = btrfs_read_node_slot(upper->eb, slot); in do_relocation()
2454 if (!node->eb) { in do_relocation()
2455 ret = btrfs_cow_block(trans, root, eb, upper->eb, in do_relocation()
2463 * the correct backref node entry. in do_relocation()
2465 ASSERT(node->eb == eb); in do_relocation()
2467 btrfs_set_node_blockptr(upper->eb, slot, in do_relocation()
2468 node->eb->start); in do_relocation()
2469 btrfs_set_node_ptr_generation(upper->eb, slot, in do_relocation()
2470 trans->transid); in do_relocation()
2471 btrfs_mark_buffer_dirty(upper->eb); in do_relocation()
2474 node->eb->start, blocksize, in do_relocation()
2475 upper->eb->start); in do_relocation()
2476 ref.real_root = root->root_key.objectid; in do_relocation()
2477 btrfs_init_tree_ref(&ref, node->level, in do_relocation()
2478 btrfs_header_owner(upper->eb)); in do_relocation()
2481 ret = btrfs_drop_subtree(trans, root, eb, in do_relocation()
2482 upper->eb); in do_relocation()
2487 if (!upper->pending) in do_relocation()
2495 if (!ret && node->pending) { in do_relocation()
2496 btrfs_backref_drop_node_buffer(node); in do_relocation()
2497 list_move_tail(&node->list, &rc->backref_cache.changed); in do_relocation()
2498 node->pending = 0; in do_relocation()
2501 path->lowest_level = 0; in do_relocation()
2507 ASSERT(ret != -ENOSPC); in do_relocation()
2513 struct btrfs_backref_node *node, in link_to_upper() argument
2518 btrfs_node_key_to_cpu(node->eb, &key, 0); in link_to_upper()
2519 return do_relocation(trans, rc, node, &key, path, 0); in link_to_upper()
2527 struct btrfs_backref_cache *cache = &rc->backref_cache; in finish_pending_nodes()
2528 struct btrfs_backref_node *node; in finish_pending_nodes() local
2533 while (!list_empty(&cache->pending[level])) { in finish_pending_nodes()
2534 node = list_entry(cache->pending[level].next, in finish_pending_nodes()
2536 list_move_tail(&node->list, &list); in finish_pending_nodes()
2537 BUG_ON(!node->pending); in finish_pending_nodes()
2540 ret = link_to_upper(trans, rc, node, path); in finish_pending_nodes()
2545 list_splice_init(&list, &cache->pending[level]); in finish_pending_nodes()
2555 struct btrfs_backref_node *node) in update_processed_blocks() argument
2557 struct btrfs_backref_node *next = node; in update_processed_blocks()
2559 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; in update_processed_blocks()
2565 if (next->processed) in update_processed_blocks()
2570 if (list_empty(&next->upper)) in update_processed_blocks()
2573 edge = list_entry(next->upper.next, in update_processed_blocks()
2576 next = edge->node[UPPER]; in update_processed_blocks()
2584 u32 blocksize = rc->extent_root->fs_info->nodesize; in tree_block_processed()
2586 if (test_range_bit(&rc->processed_blocks, bytenr, in tree_block_processed()
2587 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) in tree_block_processed()
2597 eb = read_tree_block(fs_info, block->bytenr, block->owner, in get_tree_block_key()
2598 block->key.offset, block->level, NULL); in get_tree_block_key()
2603 return -EIO; in get_tree_block_key()
2605 if (block->level == 0) in get_tree_block_key()
2606 btrfs_item_key_to_cpu(eb, &block->key, 0); in get_tree_block_key()
2608 btrfs_node_key_to_cpu(eb, &block->key, 0); in get_tree_block_key()
2610 block->key_ready = 1; in get_tree_block_key()
2619 struct btrfs_backref_node *node, in relocate_tree_block() argument
2623 struct btrfs_root *root; in relocate_tree_block() local
2626 if (!node) in relocate_tree_block()
2633 ret = reserve_metadata_space(trans, rc, node); in relocate_tree_block()
2637 BUG_ON(node->processed); in relocate_tree_block()
2638 root = select_one_root(node); in relocate_tree_block()
2639 if (IS_ERR(root)) { in relocate_tree_block()
2640 ret = PTR_ERR(root); in relocate_tree_block()
2642 /* See explanation in select_one_root for the -EUCLEAN case. */ in relocate_tree_block()
2643 ASSERT(ret == -ENOENT); in relocate_tree_block()
2644 if (ret == -ENOENT) { in relocate_tree_block()
2646 update_processed_blocks(rc, node); in relocate_tree_block()
2651 if (root) { in relocate_tree_block()
2652 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { in relocate_tree_block()
2654 * This block was the root block of a root, and this is in relocate_tree_block()
2656 * should not have had the ->new_bytenr modified and in relocate_tree_block()
2666 ASSERT(node->new_bytenr == 0); in relocate_tree_block()
2667 ASSERT(list_empty(&node->list)); in relocate_tree_block()
2668 if (node->new_bytenr || !list_empty(&node->list)) { in relocate_tree_block()
2669 btrfs_err(root->fs_info, in relocate_tree_block()
2671 node->bytenr); in relocate_tree_block()
2672 ret = -EUCLEAN; in relocate_tree_block()
2675 ret = btrfs_record_root_in_trans(trans, root); in relocate_tree_block()
2682 if (!root->reloc_root) { in relocate_tree_block()
2683 ret = -ENOENT; in relocate_tree_block()
2686 root = root->reloc_root; in relocate_tree_block()
2687 node->new_bytenr = root->node->start; in relocate_tree_block()
2688 btrfs_put_root(node->root); in relocate_tree_block()
2689 node->root = btrfs_grab_root(root); in relocate_tree_block()
2690 ASSERT(node->root); in relocate_tree_block()
2691 list_add_tail(&node->list, &rc->backref_cache.changed); in relocate_tree_block()
2693 path->lowest_level = node->level; in relocate_tree_block()
2694 ret = btrfs_search_slot(trans, root, key, path, 0, 1); in relocate_tree_block()
2700 update_processed_blocks(rc, node); in relocate_tree_block()
2702 ret = do_relocation(trans, rc, node, key, path, 1); in relocate_tree_block()
2705 if (ret || node->level == 0 || node->cowonly) in relocate_tree_block()
2706 btrfs_backref_cleanup_node(&rc->backref_cache, node); in relocate_tree_block()
2717 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in relocate_tree_blocks()
2718 struct btrfs_backref_node *node; in relocate_tree_blocks() local
2727 err = -ENOMEM; in relocate_tree_blocks()
2733 if (!block->key_ready) in relocate_tree_blocks()
2734 btrfs_readahead_tree_block(fs_info, block->bytenr, in relocate_tree_blocks()
2735 block->owner, 0, in relocate_tree_blocks()
2736 block->level); in relocate_tree_blocks()
2741 if (!block->key_ready) { in relocate_tree_blocks()
2750 node = build_backref_tree(rc, &block->key, in relocate_tree_blocks()
2751 block->level, block->bytenr); in relocate_tree_blocks()
2752 if (IS_ERR(node)) { in relocate_tree_blocks()
2753 err = PTR_ERR(node); in relocate_tree_blocks()
2757 ret = relocate_tree_block(trans, rc, node, &block->key, in relocate_tree_blocks()
2781 u64 offset = inode->index_cnt; in prealloc_file_extent_cluster()
2785 u64 i_size = i_size_read(&inode->vfs_inode); in prealloc_file_extent_cluster()
2786 u64 prealloc_start = cluster->start - offset; in prealloc_file_extent_cluster()
2787 u64 prealloc_end = cluster->end - offset; in prealloc_file_extent_cluster()
2802 struct address_space *mapping = inode->vfs_inode.i_mapping; in prealloc_file_extent_cluster()
2803 struct btrfs_fs_info *fs_info = inode->root->fs_info; in prealloc_file_extent_cluster()
2804 const u32 sectorsize = fs_info->sectorsize; in prealloc_file_extent_cluster()
2816 * |- btrfs_lock_and_flush_ordered_range() in prealloc_file_extent_cluster()
2817 * |- btrfs_start_ordered_extent() in prealloc_file_extent_cluster()
2818 * |- extent_write_cache_pages() in prealloc_file_extent_cluster()
2819 * |- lock_page() in prealloc_file_extent_cluster()
2833 clear_extent_bits(&inode->io_tree, i_size, in prealloc_file_extent_cluster()
2834 round_up(i_size, PAGE_SIZE) - 1, in prealloc_file_extent_cluster()
2839 * will re-read the whole page anyway. in prealloc_file_extent_cluster()
2843 round_up(i_size, PAGE_SIZE) - i_size); in prealloc_file_extent_cluster()
2849 BUG_ON(cluster->start != cluster->boundary[0]); in prealloc_file_extent_cluster()
2851 prealloc_end + 1 - prealloc_start); in prealloc_file_extent_cluster()
2859 if (btrfs_is_zoned(inode->root->fs_info)) { in prealloc_file_extent_cluster()
2860 struct btrfs_root *root = inode->root; in prealloc_file_extent_cluster() local
2863 end = cluster->end - offset + 1; in prealloc_file_extent_cluster()
2864 trans = btrfs_start_transaction(root, 1); in prealloc_file_extent_cluster()
2868 inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode); in prealloc_file_extent_cluster()
2869 i_size_write(&inode->vfs_inode, end); in prealloc_file_extent_cluster()
2870 ret = btrfs_update_inode(trans, root, inode); in prealloc_file_extent_cluster()
2880 btrfs_inode_lock(&inode->vfs_inode, 0); in prealloc_file_extent_cluster()
2881 for (nr = 0; nr < cluster->nr; nr++) { in prealloc_file_extent_cluster()
2882 start = cluster->boundary[nr] - offset; in prealloc_file_extent_cluster()
2883 if (nr + 1 < cluster->nr) in prealloc_file_extent_cluster()
2884 end = cluster->boundary[nr + 1] - 1 - offset; in prealloc_file_extent_cluster()
2886 end = cluster->end - offset; in prealloc_file_extent_cluster()
2888 lock_extent(&inode->io_tree, start, end); in prealloc_file_extent_cluster()
2889 num_bytes = end + 1 - start; in prealloc_file_extent_cluster()
2890 ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start, in prealloc_file_extent_cluster()
2894 unlock_extent(&inode->io_tree, start, end); in prealloc_file_extent_cluster()
2898 btrfs_inode_unlock(&inode->vfs_inode, 0); in prealloc_file_extent_cluster()
2901 btrfs_free_reserved_data_space_noquota(inode->root->fs_info, in prealloc_file_extent_cluster()
2902 prealloc_end + 1 - cur_offset); in prealloc_file_extent_cluster()
2910 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; in setup_extent_mapping()
2916 return -ENOMEM; in setup_extent_mapping()
2918 em->start = start; in setup_extent_mapping()
2919 em->len = end + 1 - start; in setup_extent_mapping()
2920 em->block_len = em->len; in setup_extent_mapping()
2921 em->block_start = block_start; in setup_extent_mapping()
2922 set_bit(EXTENT_FLAG_PINNED, &em->flags); in setup_extent_mapping()
2924 lock_extent(&BTRFS_I(inode)->io_tree, start, end); in setup_extent_mapping()
2926 write_lock(&em_tree->lock); in setup_extent_mapping()
2928 write_unlock(&em_tree->lock); in setup_extent_mapping()
2929 if (ret != -EEXIST) { in setup_extent_mapping()
2935 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); in setup_extent_mapping()
2944 return atomic_read(&fs_info->balance_cancel_req) || in btrfs_should_cancel_balance()
2945 atomic_read(&fs_info->reloc_cancel_req) || in btrfs_should_cancel_balance()
2954 if (cluster_nr >= cluster->nr - 1) in get_cluster_boundary_end()
2955 return cluster->end; in get_cluster_boundary_end()
2958 return cluster->boundary[cluster_nr + 1] - 1; in get_cluster_boundary_end()
2965 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in relocate_one_page()
2966 u64 offset = BTRFS_I(inode)->index_cnt; in relocate_one_page()
2967 const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT; in relocate_one_page()
2968 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); in relocate_one_page()
2976 page = find_lock_page(inode->i_mapping, page_index); in relocate_one_page()
2978 page_cache_sync_readahead(inode->i_mapping, ra, NULL, in relocate_one_page()
2979 page_index, last_index + 1 - page_index); in relocate_one_page()
2980 page = find_or_create_page(inode->i_mapping, page_index, mask); in relocate_one_page()
2982 return -ENOMEM; in relocate_one_page()
2989 page_cache_async_readahead(inode->i_mapping, ra, NULL, page, in relocate_one_page()
2990 page_index, last_index + 1 - page_index); in relocate_one_page()
2996 ret = -EIO; in relocate_one_page()
3002 page_end = page_start + PAGE_SIZE - 1; in relocate_one_page()
3008 cur = max(page_start, cluster->boundary[*cluster_nr] - offset); in relocate_one_page()
3010 u64 extent_start = cluster->boundary[*cluster_nr] - offset; in relocate_one_page()
3012 *cluster_nr) - offset; in relocate_one_page()
3015 u32 clamped_len = clamped_end + 1 - clamped_start; in relocate_one_page()
3024 lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end); in relocate_one_page()
3028 clear_extent_bits(&BTRFS_I(inode)->io_tree, in relocate_one_page()
3046 if (in_range(cluster->boundary[*cluster_nr] - offset, in relocate_one_page()
3048 u64 boundary_start = cluster->boundary[*cluster_nr] - in relocate_one_page()
3051 fs_info->sectorsize - 1; in relocate_one_page()
3053 set_extent_bits(&BTRFS_I(inode)->io_tree, in relocate_one_page()
3057 unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end); in relocate_one_page()
3065 if (*cluster_nr >= cluster->nr) in relocate_one_page()
3072 balance_dirty_pages_ratelimited(inode->i_mapping); in relocate_one_page()
3075 ret = -ECANCELED; in relocate_one_page()
3087 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in relocate_file_extent_cluster()
3088 u64 offset = BTRFS_I(inode)->index_cnt; in relocate_file_extent_cluster()
3095 if (!cluster->nr) in relocate_file_extent_cluster()
3100 return -ENOMEM; in relocate_file_extent_cluster()
3106 file_ra_state_init(ra, inode->i_mapping); in relocate_file_extent_cluster()
3108 ret = setup_extent_mapping(inode, cluster->start - offset, in relocate_file_extent_cluster()
3109 cluster->end - offset, cluster->start); in relocate_file_extent_cluster()
3113 last_index = (cluster->end - offset) >> PAGE_SHIFT; in relocate_file_extent_cluster()
3114 for (index = (cluster->start - offset) >> PAGE_SHIFT; in relocate_file_extent_cluster()
3118 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); in relocate_file_extent_cluster()
3120 WARN_ON(cluster_nr != cluster->nr); in relocate_file_extent_cluster()
3132 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { in relocate_data_extent()
3136 cluster->nr = 0; in relocate_data_extent()
3139 if (!cluster->nr) in relocate_data_extent()
3140 cluster->start = extent_key->objectid; in relocate_data_extent()
3142 BUG_ON(cluster->nr >= MAX_EXTENTS); in relocate_data_extent()
3143 cluster->end = extent_key->objectid + extent_key->offset - 1; in relocate_data_extent()
3144 cluster->boundary[cluster->nr] = extent_key->objectid; in relocate_data_extent()
3145 cluster->nr++; in relocate_data_extent()
3147 if (cluster->nr >= MAX_EXTENTS) { in relocate_data_extent()
3151 cluster->nr = 0; in relocate_data_extent()
3171 int level = -1; in add_tree_block()
3175 eb = path->nodes[0]; in add_tree_block()
3176 item_size = btrfs_item_size_nr(eb, path->slots[0]); in add_tree_block()
3178 if (extent_key->type == BTRFS_METADATA_ITEM_KEY || in add_tree_block()
3182 ei = btrfs_item_ptr(eb, path->slots[0], in add_tree_block()
3185 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) { in add_tree_block()
3190 level = (int)extent_key->offset; in add_tree_block()
3200 * inline ref offset. We know it's an fs root if in add_tree_block()
3221 return -EINVAL; in add_tree_block()
3226 btrfs_print_v0_err(eb->fs_info); in add_tree_block()
3227 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL); in add_tree_block()
3228 return -EINVAL; in add_tree_block()
3235 BUG_ON(level == -1); in add_tree_block()
3239 return -ENOMEM; in add_tree_block()
3241 block->bytenr = extent_key->objectid; in add_tree_block()
3242 block->key.objectid = rc->extent_root->fs_info->nodesize; in add_tree_block()
3243 block->key.offset = generation; in add_tree_block()
3244 block->level = level; in add_tree_block()
3245 block->key_ready = 0; in add_tree_block()
3246 block->owner = owner; in add_tree_block()
3248 rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node); in add_tree_block()
3250 btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr, in add_tree_block()
3251 -EEXIST); in add_tree_block()
3263 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in __add_tree_block()
3277 return -ENOMEM; in __add_tree_block()
3282 key.offset = (u64)-1; in __add_tree_block()
3288 path->search_commit_root = 1; in __add_tree_block()
3289 path->skip_locking = 1; in __add_tree_block()
3290 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); in __add_tree_block()
3295 if (path->slots[0]) { in __add_tree_block()
3296 path->slots[0]--; in __add_tree_block()
3297 btrfs_item_key_to_cpu(path->nodes[0], &key, in __add_tree_block()
3298 path->slots[0]); in __add_tree_block()
3314 btrfs_print_leaf(path->nodes[0]); in __add_tree_block()
3319 ret = -EINVAL; in __add_tree_block()
3334 struct btrfs_root *root = fs_info->tree_root; in delete_block_group_cache() local
3341 inode = btrfs_iget(fs_info->sb, ino, root); in delete_block_group_cache()
3343 return -ENOENT; in delete_block_group_cache()
3347 &fs_info->global_block_rsv); in delete_block_group_cache()
3351 trans = btrfs_join_transaction(root); in delete_block_group_cache()
3367 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3402 return -ENOENT; in delete_v1_space_cache()
3403 ret = delete_block_group_cache(leaf->fs_info, block_group, NULL, in delete_v1_space_cache()
3417 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in add_data_references()
3421 const u32 blocksize = fs_info->nodesize; in add_data_references()
3425 ret = btrfs_find_all_leafs(NULL, fs_info, extent_key->objectid, in add_data_references()
3434 eb = read_tree_block(fs_info, ref_node->val, 0, 0, 0, NULL); in add_data_references()
3439 ret = delete_v1_space_cache(eb, rc->block_group, in add_data_references()
3440 extent_key->objectid); in add_data_references()
3444 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks); in add_data_references()
3461 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in find_next_extent()
3467 last = rc->block_group->start + rc->block_group->length; in find_next_extent()
3470 if (rc->search_start >= last) { in find_next_extent()
3475 key.objectid = rc->search_start; in find_next_extent()
3479 path->search_commit_root = 1; in find_next_extent()
3480 path->skip_locking = 1; in find_next_extent()
3481 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, in find_next_extent()
3486 leaf = path->nodes[0]; in find_next_extent()
3487 if (path->slots[0] >= btrfs_header_nritems(leaf)) { in find_next_extent()
3488 ret = btrfs_next_leaf(rc->extent_root, path); in find_next_extent()
3491 leaf = path->nodes[0]; in find_next_extent()
3494 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in find_next_extent()
3502 path->slots[0]++; in find_next_extent()
3507 key.objectid + key.offset <= rc->search_start) { in find_next_extent()
3508 path->slots[0]++; in find_next_extent()
3513 key.objectid + fs_info->nodesize <= in find_next_extent()
3514 rc->search_start) { in find_next_extent()
3515 path->slots[0]++; in find_next_extent()
3519 ret = find_first_extent_bit(&rc->processed_blocks, in find_next_extent()
3525 rc->search_start = end + 1; in find_next_extent()
3528 rc->search_start = key.objectid + key.offset; in find_next_extent()
3530 rc->search_start = key.objectid + in find_next_extent()
3531 fs_info->nodesize; in find_next_extent()
3542 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in set_reloc_control()
3544 mutex_lock(&fs_info->reloc_mutex); in set_reloc_control()
3545 fs_info->reloc_ctl = rc; in set_reloc_control()
3546 mutex_unlock(&fs_info->reloc_mutex); in set_reloc_control()
3551 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in unset_reloc_control()
3553 mutex_lock(&fs_info->reloc_mutex); in unset_reloc_control()
3554 fs_info->reloc_ctl = NULL; in unset_reloc_control()
3555 mutex_unlock(&fs_info->reloc_mutex); in unset_reloc_control()
3564 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info, in prepare_to_relocate()
3566 if (!rc->block_rsv) in prepare_to_relocate()
3567 return -ENOMEM; in prepare_to_relocate()
3569 memset(&rc->cluster, 0, sizeof(rc->cluster)); in prepare_to_relocate()
3570 rc->search_start = rc->block_group->start; in prepare_to_relocate()
3571 rc->extents_found = 0; in prepare_to_relocate()
3572 rc->nodes_relocated = 0; in prepare_to_relocate()
3573 rc->merging_rsv_size = 0; in prepare_to_relocate()
3574 rc->reserved_bytes = 0; in prepare_to_relocate()
3575 rc->block_rsv->size = rc->extent_root->fs_info->nodesize * in prepare_to_relocate()
3577 ret = btrfs_block_rsv_refill(rc->extent_root, in prepare_to_relocate()
3578 rc->block_rsv, rc->block_rsv->size, in prepare_to_relocate()
3583 rc->create_reloc_tree = 1; in prepare_to_relocate()
3586 trans = btrfs_join_transaction(rc->extent_root); in prepare_to_relocate()
3601 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; in relocate_block_group()
3614 return -ENOMEM; in relocate_block_group()
3615 path->reada = READA_FORWARD; in relocate_block_group()
3624 rc->reserved_bytes = 0; in relocate_block_group()
3625 ret = btrfs_block_rsv_refill(rc->extent_root, in relocate_block_group()
3626 rc->block_rsv, rc->block_rsv->size, in relocate_block_group()
3633 trans = btrfs_start_transaction(rc->extent_root, 0); in relocate_block_group()
3640 if (update_backref_cache(trans, &rc->backref_cache)) { in relocate_block_group()
3652 rc->extents_found++; in relocate_block_group()
3654 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], in relocate_block_group()
3656 flags = btrfs_extent_flags(path->nodes[0], ei); in relocate_block_group()
3660 } else if (rc->stage == UPDATE_DATA_PTRS && in relocate_block_group()
3675 if (ret != -EAGAIN) { in relocate_block_group()
3679 rc->extents_found--; in relocate_block_group()
3680 rc->search_start = key.objectid; in relocate_block_group()
3688 if (rc->stage == MOVE_DATA_EXTENTS && in relocate_block_group()
3690 rc->found_file_extent = 1; in relocate_block_group()
3691 ret = relocate_data_extent(rc->data_inode, in relocate_block_group()
3692 &key, &rc->cluster); in relocate_block_group()
3699 err = -ECANCELED; in relocate_block_group()
3703 if (trans && progress && err == -ENOSPC) { in relocate_block_group()
3704 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags); in relocate_block_group()
3713 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY); in relocate_block_group()
3721 ret = relocate_file_extent_cluster(rc->data_inode, in relocate_block_group()
3722 &rc->cluster); in relocate_block_group()
3727 rc->create_reloc_tree = 0; in relocate_block_group()
3730 btrfs_backref_release_cache(&rc->backref_cache); in relocate_block_group()
3731 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); in relocate_block_group()
3745 rc->merge_reloc_tree = 0; in relocate_block_group()
3747 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); in relocate_block_group()
3750 trans = btrfs_join_transaction(rc->extent_root); in relocate_block_group()
3762 btrfs_free_block_rsv(fs_info, rc->block_rsv); in relocate_block_group()
3768 struct btrfs_root *root, u64 objectid) in __insert_orphan_inode() argument
3776 if (btrfs_is_zoned(trans->fs_info)) in __insert_orphan_inode()
3781 return -ENOMEM; in __insert_orphan_inode()
3783 ret = btrfs_insert_empty_inode(trans, root, path, objectid); in __insert_orphan_inode()
3787 leaf = path->nodes[0]; in __insert_orphan_inode()
3788 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); in __insert_orphan_inode()
3801 struct btrfs_root *root, u64 objectid) in delete_orphan_inode() argument
3809 ret = -ENOMEM; in delete_orphan_inode()
3816 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); in delete_orphan_inode()
3819 ret = -ENOENT; in delete_orphan_inode()
3822 ret = btrfs_del_item(trans, root, path); in delete_orphan_inode()
3839 struct btrfs_root *root; in create_reloc_inode() local
3843 root = btrfs_grab_root(fs_info->data_reloc_root); in create_reloc_inode()
3844 trans = btrfs_start_transaction(root, 6); in create_reloc_inode()
3846 btrfs_put_root(root); in create_reloc_inode()
3850 err = btrfs_get_free_objectid(root, &objectid); in create_reloc_inode()
3854 err = __insert_orphan_inode(trans, root, objectid); in create_reloc_inode()
3858 inode = btrfs_iget(fs_info->sb, objectid, root); in create_reloc_inode()
3860 delete_orphan_inode(trans, root, objectid); in create_reloc_inode()
3865 BTRFS_I(inode)->index_cnt = group->start; in create_reloc_inode()
3869 btrfs_put_root(root); in create_reloc_inode()
3886 * -EINPROGRESS operation is already in progress, that's probably a bug
3887 * -ECANCELED cancellation request was set before the operation started
3888 * -EAGAIN can not start because there are ongoing send operations
3892 spin_lock(&fs_info->send_reloc_lock); in reloc_chunk_start()
3893 if (fs_info->send_in_progress) { in reloc_chunk_start()
3896 fs_info->send_in_progress); in reloc_chunk_start()
3897 spin_unlock(&fs_info->send_reloc_lock); in reloc_chunk_start()
3898 return -EAGAIN; in reloc_chunk_start()
3900 if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) { in reloc_chunk_start()
3902 spin_unlock(&fs_info->send_reloc_lock); in reloc_chunk_start()
3904 return -EINPROGRESS; in reloc_chunk_start()
3906 spin_unlock(&fs_info->send_reloc_lock); in reloc_chunk_start()
3908 if (atomic_read(&fs_info->reloc_cancel_req) > 0) { in reloc_chunk_start()
3914 atomic_set(&fs_info->reloc_cancel_req, 0); in reloc_chunk_start()
3915 return -ECANCELED; in reloc_chunk_start()
3926 if (atomic_read(&fs_info->reloc_cancel_req) > 0) in reloc_chunk_end()
3928 spin_lock(&fs_info->send_reloc_lock); in reloc_chunk_end()
3929 clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags); in reloc_chunk_end()
3930 spin_unlock(&fs_info->send_reloc_lock); in reloc_chunk_end()
3931 atomic_set(&fs_info->reloc_cancel_req, 0); in reloc_chunk_end()
3942 INIT_LIST_HEAD(&rc->reloc_roots); in alloc_reloc_control()
3943 INIT_LIST_HEAD(&rc->dirty_subvol_roots); in alloc_reloc_control()
3944 btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1); in alloc_reloc_control()
3945 mapping_tree_init(&rc->reloc_root_tree); in alloc_reloc_control()
3946 extent_io_tree_init(fs_info, &rc->processed_blocks, in alloc_reloc_control()
3953 struct mapping_node *node, *tmp; in free_reloc_control() local
3955 free_reloc_roots(&rc->reloc_roots); in free_reloc_control()
3956 rbtree_postorder_for_each_entry_safe(node, tmp, in free_reloc_control()
3957 &rc->reloc_root_tree.rb_root, rb_node) in free_reloc_control()
3958 kfree(node); in free_reloc_control()
3971 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf)); in describe_relocation()
3975 block_group->start, buf); in describe_relocation()
3993 struct btrfs_root *extent_root = fs_info->extent_root; in btrfs_relocate_block_group()
4003 return -ENOENT; in btrfs_relocate_block_group()
4007 return -ETXTBSY; in btrfs_relocate_block_group()
4013 return -ENOMEM; in btrfs_relocate_block_group()
4022 rc->extent_root = extent_root; in btrfs_relocate_block_group()
4023 rc->block_group = bg; in btrfs_relocate_block_group()
4025 ret = btrfs_inc_block_group_ro(rc->block_group, true); in btrfs_relocate_block_group()
4034 err = -ENOMEM; in btrfs_relocate_block_group()
4038 inode = lookup_free_space_inode(rc->block_group, path); in btrfs_relocate_block_group()
4042 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0); in btrfs_relocate_block_group()
4046 if (ret && ret != -ENOENT) { in btrfs_relocate_block_group()
4051 rc->data_inode = create_reloc_inode(fs_info, rc->block_group); in btrfs_relocate_block_group()
4052 if (IS_ERR(rc->data_inode)) { in btrfs_relocate_block_group()
4053 err = PTR_ERR(rc->data_inode); in btrfs_relocate_block_group()
4054 rc->data_inode = NULL; in btrfs_relocate_block_group()
4058 describe_relocation(fs_info, rc->block_group); in btrfs_relocate_block_group()
4060 btrfs_wait_block_group_reservations(rc->block_group); in btrfs_relocate_block_group()
4061 btrfs_wait_nocow_writers(rc->block_group); in btrfs_relocate_block_group()
4063 rc->block_group->start, in btrfs_relocate_block_group()
4064 rc->block_group->length); in btrfs_relocate_block_group()
4069 mutex_lock(&fs_info->cleaner_mutex); in btrfs_relocate_block_group()
4071 mutex_unlock(&fs_info->cleaner_mutex); in btrfs_relocate_block_group()
4075 finishes_stage = rc->stage; in btrfs_relocate_block_group()
4080 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in in btrfs_relocate_block_group()
4085 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { in btrfs_relocate_block_group()
4086 ret = btrfs_wait_ordered_range(rc->data_inode, 0, in btrfs_relocate_block_group()
4087 (u64)-1); in btrfs_relocate_block_group()
4090 invalidate_mapping_pages(rc->data_inode->i_mapping, in btrfs_relocate_block_group()
4091 0, -1); in btrfs_relocate_block_group()
4092 rc->stage = UPDATE_DATA_PTRS; in btrfs_relocate_block_group()
4098 if (rc->extents_found == 0) in btrfs_relocate_block_group()
4102 rc->extents_found, stage_to_string(finishes_stage)); in btrfs_relocate_block_group()
4105 WARN_ON(rc->block_group->pinned > 0); in btrfs_relocate_block_group()
4106 WARN_ON(rc->block_group->reserved > 0); in btrfs_relocate_block_group()
4107 WARN_ON(rc->block_group->used > 0); in btrfs_relocate_block_group()
4110 btrfs_dec_block_group_ro(rc->block_group); in btrfs_relocate_block_group()
4111 iput(rc->data_inode); in btrfs_relocate_block_group()
4119 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) in mark_garbage_root() argument
4121 struct btrfs_fs_info *fs_info = root->fs_info; in mark_garbage_root()
4125 trans = btrfs_start_transaction(fs_info->tree_root, 0); in mark_garbage_root()
4129 memset(&root->root_item.drop_progress, 0, in mark_garbage_root()
4130 sizeof(root->root_item.drop_progress)); in mark_garbage_root()
4131 btrfs_set_root_drop_level(&root->root_item, 0); in mark_garbage_root()
4132 btrfs_set_root_refs(&root->root_item, 0); in mark_garbage_root()
4133 ret = btrfs_update_root(trans, fs_info->tree_root, in mark_garbage_root()
4134 &root->root_key, &root->root_item); in mark_garbage_root()
4148 int btrfs_recover_relocation(struct btrfs_root *root) in btrfs_recover_relocation() argument
4150 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_recover_relocation()
4164 return -ENOMEM; in btrfs_recover_relocation()
4165 path->reada = READA_BACK; in btrfs_recover_relocation()
4169 key.offset = (u64)-1; in btrfs_recover_relocation()
4172 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, in btrfs_recover_relocation()
4179 if (path->slots[0] == 0) in btrfs_recover_relocation()
4181 path->slots[0]--; in btrfs_recover_relocation()
4183 leaf = path->nodes[0]; in btrfs_recover_relocation()
4184 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in btrfs_recover_relocation()
4191 reloc_root = btrfs_read_tree_root(root, &key); in btrfs_recover_relocation()
4197 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); in btrfs_recover_relocation()
4198 list_add(&reloc_root->root_list, &reloc_roots); in btrfs_recover_relocation()
4200 if (btrfs_root_refs(&reloc_root->root_item) > 0) { in btrfs_recover_relocation()
4202 reloc_root->root_key.offset, false); in btrfs_recover_relocation()
4205 if (ret != -ENOENT) { in btrfs_recover_relocation()
4222 key.offset--; in btrfs_recover_relocation()
4231 err = -ENOMEM; in btrfs_recover_relocation()
4241 rc->extent_root = fs_info->extent_root; in btrfs_recover_relocation()
4245 trans = btrfs_join_transaction(rc->extent_root); in btrfs_recover_relocation()
4251 rc->merge_reloc_tree = 1; in btrfs_recover_relocation()
4256 list_del(&reloc_root->root_list); in btrfs_recover_relocation()
4258 if (btrfs_root_refs(&reloc_root->root_item) == 0) { in btrfs_recover_relocation()
4259 list_add_tail(&reloc_root->root_list, in btrfs_recover_relocation()
4260 &rc->reloc_roots); in btrfs_recover_relocation()
4264 fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, in btrfs_recover_relocation()
4268 list_add_tail(&reloc_root->root_list, &reloc_roots); in btrfs_recover_relocation()
4274 ASSERT(err != -EEXIST); in btrfs_recover_relocation()
4276 list_add_tail(&reloc_root->root_list, &reloc_roots); in btrfs_recover_relocation()
4281 fs_root->reloc_root = btrfs_grab_root(reloc_root); in btrfs_recover_relocation()
4293 trans = btrfs_join_transaction(rc->extent_root); in btrfs_recover_relocation()
4315 fs_root = btrfs_grab_root(fs_info->data_reloc_root); in btrfs_recover_relocation()
4327 * it also saves CPU time to re-calculate the checksum.
4331 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_reloc_clone_csums()
4340 BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len); in btrfs_reloc_clone_csums()
4342 disk_bytenr = file_pos + inode->index_cnt; in btrfs_reloc_clone_csums()
4343 ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr, in btrfs_reloc_clone_csums()
4344 disk_bytenr + len - 1, &list, 0); in btrfs_reloc_clone_csums()
4350 list_del_init(&sums->list); in btrfs_reloc_clone_csums()
4364 new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr; in btrfs_reloc_clone_csums()
4365 sums->bytenr = new_bytenr; in btrfs_reloc_clone_csums()
4375 struct btrfs_root *root, struct extent_buffer *buf, in btrfs_reloc_cow_block() argument
4378 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_reloc_cow_block()
4380 struct btrfs_backref_node *node; in btrfs_reloc_cow_block() local
4385 rc = fs_info->reloc_ctl; in btrfs_reloc_cow_block()
4389 BUG_ON(rc->stage == UPDATE_DATA_PTRS && in btrfs_reloc_cow_block()
4390 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); in btrfs_reloc_cow_block()
4394 btrfs_root_last_snapshot(&root->root_item)) in btrfs_reloc_cow_block()
4397 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && in btrfs_reloc_cow_block()
4398 rc->create_reloc_tree) { in btrfs_reloc_cow_block()
4401 node = rc->backref_cache.path[level]; in btrfs_reloc_cow_block()
4402 BUG_ON(node->bytenr != buf->start && in btrfs_reloc_cow_block()
4403 node->new_bytenr != buf->start); in btrfs_reloc_cow_block()
4405 btrfs_backref_drop_node_buffer(node); in btrfs_reloc_cow_block()
4406 atomic_inc(&cow->refs); in btrfs_reloc_cow_block()
4407 node->eb = cow; in btrfs_reloc_cow_block()
4408 node->new_bytenr = cow->start; in btrfs_reloc_cow_block()
4410 if (!node->pending) { in btrfs_reloc_cow_block()
4411 list_move_tail(&node->list, in btrfs_reloc_cow_block()
4412 &rc->backref_cache.pending[level]); in btrfs_reloc_cow_block()
4413 node->pending = 1; in btrfs_reloc_cow_block()
4417 mark_block_processed(rc, node); in btrfs_reloc_cow_block()
4420 rc->nodes_relocated += buf->len; in btrfs_reloc_cow_block()
4423 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) in btrfs_reloc_cow_block()
4424 ret = replace_file_extents(trans, rc, root, cow); in btrfs_reloc_cow_block()
4435 struct btrfs_root *root = pending->root; in btrfs_reloc_pre_snapshot() local
4436 struct reloc_control *rc = root->fs_info->reloc_ctl; in btrfs_reloc_pre_snapshot()
4438 if (!rc || !have_reloc_root(root)) in btrfs_reloc_pre_snapshot()
4441 if (!rc->merge_reloc_tree) in btrfs_reloc_pre_snapshot()
4444 root = root->reloc_root; in btrfs_reloc_pre_snapshot()
4445 BUG_ON(btrfs_root_refs(&root->root_item) == 0); in btrfs_reloc_pre_snapshot()
4456 *bytes_to_reserve += rc->nodes_relocated; in btrfs_reloc_pre_snapshot()
4461 * and create reloc root for the newly created snapshot
4464 * references held on the reloc_root, one for root->reloc_root and one for
4465 * rc->reloc_roots.
4470 struct btrfs_root *root = pending->root; in btrfs_reloc_post_snapshot() local
4473 struct reloc_control *rc = root->fs_info->reloc_ctl; in btrfs_reloc_post_snapshot()
4476 if (!rc || !have_reloc_root(root)) in btrfs_reloc_post_snapshot()
4479 rc = root->fs_info->reloc_ctl; in btrfs_reloc_post_snapshot()
4480 rc->merging_rsv_size += rc->nodes_relocated; in btrfs_reloc_post_snapshot()
4482 if (rc->merge_reloc_tree) { in btrfs_reloc_post_snapshot()
4483 ret = btrfs_block_rsv_migrate(&pending->block_rsv, in btrfs_reloc_post_snapshot()
4484 rc->block_rsv, in btrfs_reloc_post_snapshot()
4485 rc->nodes_relocated, true); in btrfs_reloc_post_snapshot()
4490 new_root = pending->snap; in btrfs_reloc_post_snapshot()
4491 reloc_root = create_reloc_root(trans, root->reloc_root, in btrfs_reloc_post_snapshot()
4492 new_root->root_key.objectid); in btrfs_reloc_post_snapshot()
4497 ASSERT(ret != -EEXIST); in btrfs_reloc_post_snapshot()
4503 new_root->reloc_root = btrfs_grab_root(reloc_root); in btrfs_reloc_post_snapshot()
4505 if (rc->create_reloc_tree) in btrfs_reloc_post_snapshot()
4506 ret = clone_backref_node(trans, rc, root, reloc_root); in btrfs_reloc_post_snapshot()