Lines Matching +full:edge +full:- +full:offset
1 // SPDX-License-Identifier: GPL-2.0
10 #include "disk-io.h"
14 #include "delayed-ref.h"
17 #include "tree-mod-log.h"
24 u64 offset; member
35 u64 offset = 0; in check_extent_in_eb() local
51 offset = extent_item_pos - data_offset; in check_extent_in_eb()
56 return -ENOMEM; in check_extent_in_eb()
58 e->next = *eie; in check_extent_in_eb()
59 e->inum = key->objectid; in check_extent_in_eb()
60 e->offset = key->offset + offset; in check_extent_in_eb()
71 eie_next = eie->next; in free_inode_elem_list()
133 * ref->count >0:
134 * - incremented when a ref->count transitions to >0
135 * - decremented when a ref->count transitions to <1
146 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0; in extent_is_shared()
159 return -ENOMEM; in btrfs_prelim_ref_init()
175 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
181 if (ref1->level < ref2->level) in prelim_ref_compare()
182 return -1; in prelim_ref_compare()
183 if (ref1->level > ref2->level) in prelim_ref_compare()
185 if (ref1->root_id < ref2->root_id) in prelim_ref_compare()
186 return -1; in prelim_ref_compare()
187 if (ref1->root_id > ref2->root_id) in prelim_ref_compare()
189 if (ref1->key_for_search.type < ref2->key_for_search.type) in prelim_ref_compare()
190 return -1; in prelim_ref_compare()
191 if (ref1->key_for_search.type > ref2->key_for_search.type) in prelim_ref_compare()
193 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid) in prelim_ref_compare()
194 return -1; in prelim_ref_compare()
195 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid) in prelim_ref_compare()
197 if (ref1->key_for_search.offset < ref2->key_for_search.offset) in prelim_ref_compare()
198 return -1; in prelim_ref_compare()
199 if (ref1->key_for_search.offset > ref2->key_for_search.offset) in prelim_ref_compare()
201 if (ref1->parent < ref2->parent) in prelim_ref_compare()
202 return -1; in prelim_ref_compare()
203 if (ref1->parent > ref2->parent) in prelim_ref_compare()
216 sc->share_count--; in update_share_count()
218 sc->share_count++; in update_share_count()
238 root = &preftree->root; in prelim_ref_insert()
239 p = &root->rb_root.rb_node; in prelim_ref_insert()
246 p = &(*p)->rb_left; in prelim_ref_insert()
248 p = &(*p)->rb_right; in prelim_ref_insert()
252 struct extent_inode_elem *eie = ref->inode_list; in prelim_ref_insert()
254 while (eie && eie->next) in prelim_ref_insert()
255 eie = eie->next; in prelim_ref_insert()
258 ref->inode_list = newref->inode_list; in prelim_ref_insert()
260 eie->next = newref->inode_list; in prelim_ref_insert()
262 preftree->count); in prelim_ref_insert()
264 * A delayed ref can have newref->count < 0. in prelim_ref_insert()
265 * The ref->count is updated to follow any in prelim_ref_insert()
268 update_share_count(sc, ref->count, in prelim_ref_insert()
269 ref->count + newref->count); in prelim_ref_insert()
270 ref->count += newref->count; in prelim_ref_insert()
276 update_share_count(sc, 0, newref->count); in prelim_ref_insert()
277 preftree->count++; in prelim_ref_insert()
278 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count); in prelim_ref_insert()
279 rb_link_node(&newref->rbnode, parent, p); in prelim_ref_insert()
280 rb_insert_color_cached(&newref->rbnode, root, leftmost); in prelim_ref_insert()
292 &preftree->root.rb_root, rbnode) { in prelim_release()
293 free_inode_elem_list(ref->inode_list); in prelim_release()
297 preftree->root = RB_ROOT_CACHED; in prelim_release()
298 preftree->count = 0; in prelim_release()
303 * - obtaining the parent is the goal
304 * - if you add a key, you must know that it is a correct key
305 * - if you cannot add the parent or a correct key, then we will look into the
312 * --------------------+--------+----------+--------+----------
313 * parent logical | y | - | - | -
314 * key to resolve | - | y | y | y
315 * tree block logical | - | - | - | -
318 * - column 1: we've the parent -> done
319 * - column 2, 3, 4: we use the key to find the parent
325 * --------------------+--------+----------+--------+----------
326 * parent logical | y | - | y | -
327 * key to resolve | - | - | - | y
329 * root for resolving | - | y | y | y
331 * - column 1, 3: we've the parent -> done
332 * - column 2: we take the first key from the block to find the parent
334 * - column 4: we use the key to find the parent
352 return -ENOMEM; in add_prelim_ref()
354 ref->root_id = root_id; in add_prelim_ref()
356 ref->key_for_search = *key; in add_prelim_ref()
358 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); in add_prelim_ref()
360 ref->inode_list = NULL; in add_prelim_ref()
361 ref->level = level; in add_prelim_ref()
362 ref->count = count; in add_prelim_ref()
363 ref->parent = parent; in add_prelim_ref()
364 ref->wanted_disk_byte = wanted_disk_byte; in add_prelim_ref()
375 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level, in add_direct_ref()
386 struct preftree *tree = &preftrees->indirect; in add_indirect_ref()
389 tree = &preftrees->indirect_missing_keys; in add_indirect_ref()
396 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; in is_shared_data_backref()
410 p = &(*p)->rb_left; in is_shared_data_backref()
412 p = &(*p)->rb_right; in is_shared_data_backref()
429 struct btrfs_key *key_for_search = &ref->key_for_search; in add_all_parents()
433 u64 wanted_disk_byte = ref->wanted_disk_byte; in add_all_parents()
438 eb = path->nodes[level]; in add_all_parents()
439 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); in add_all_parents()
455 eb = path->nodes[0]; in add_all_parents()
456 if (path->slots[0] >= btrfs_header_nritems(eb) || in add_all_parents()
457 is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
458 ref->root_id != btrfs_header_owner(eb)) { in add_all_parents()
465 while (!ret && count < ref->count) { in add_all_parents()
466 eb = path->nodes[0]; in add_all_parents()
467 slot = path->slots[0]; in add_all_parents()
471 if (key.objectid != key_for_search->objectid || in add_all_parents()
481 (is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
482 ref->root_id != btrfs_header_owner(eb))) { in add_all_parents()
496 if (ref->key_for_search.offset == key.offset - data_offset) in add_all_parents()
509 ret = ulist_add_merge_ptr(parents, eb->start, in add_all_parents()
514 while (old->next) in add_all_parents()
515 old = old->next; in add_all_parents()
516 old->next = eie; in add_all_parents()
548 int level = ref->level; in resolve_indirect_ref()
549 struct btrfs_key search_key = ref->key_for_search; in resolve_indirect_ref()
559 if (path->search_commit_root) in resolve_indirect_ref()
560 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id); in resolve_indirect_ref()
562 root = btrfs_get_fs_root(fs_info, ref->root_id, false); in resolve_indirect_ref()
568 if (!path->search_commit_root && in resolve_indirect_ref()
569 test_bit(BTRFS_ROOT_DELETING, &root->state)) { in resolve_indirect_ref()
570 ret = -ENOENT; in resolve_indirect_ref()
575 ret = -ENOENT; in resolve_indirect_ref()
579 if (path->search_commit_root) in resolve_indirect_ref()
580 root_level = btrfs_header_level(root->commit_root); in resolve_indirect_ref()
582 root_level = btrfs_header_level(root->node); in resolve_indirect_ref()
590 * We can often find data backrefs with an offset that is too large in resolve_indirect_ref()
591 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when in resolve_indirect_ref()
592 * subtracting a file's offset with the data offset of its in resolve_indirect_ref()
596 * So if we detect such case we set the search key's offset to zero to in resolve_indirect_ref()
598 * add_all_parents(), otherwise we will miss it because the offset in resolve_indirect_ref()
599 * taken form the backref is much larger then the offset of the file in resolve_indirect_ref()
609 search_key.offset >= LLONG_MAX) in resolve_indirect_ref()
610 search_key.offset = 0; in resolve_indirect_ref()
611 path->lowest_level = level; in resolve_indirect_ref()
619 ref->root_id, level, ref->count, ret, in resolve_indirect_ref()
620 ref->key_for_search.objectid, ref->key_for_search.type, in resolve_indirect_ref()
621 ref->key_for_search.offset); in resolve_indirect_ref()
625 eb = path->nodes[level]; in resolve_indirect_ref()
631 level--; in resolve_indirect_ref()
632 eb = path->nodes[level]; in resolve_indirect_ref()
640 path->lowest_level = 0; in resolve_indirect_ref()
650 return (struct extent_inode_elem *)(uintptr_t)node->aux; in unode_aux_to_inode_list()
696 return -ENOMEM; in resolve_indirect_refs()
704 while ((rnode = rb_first_cached(&preftrees->indirect.root))) { in resolve_indirect_refs()
708 if (WARN(ref->parent, in resolve_indirect_refs()
710 ret = -EINVAL; in resolve_indirect_refs()
714 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); in resolve_indirect_refs()
715 preftrees->indirect.count--; in resolve_indirect_refs()
717 if (ref->count == 0) { in resolve_indirect_refs()
722 if (sc && sc->root_objectid && in resolve_indirect_refs()
723 ref->root_id != sc->root_objectid) { in resolve_indirect_refs()
735 if (err == -ENOENT) { in resolve_indirect_refs()
736 prelim_ref_insert(fs_info, &preftrees->direct, ref, in resolve_indirect_refs()
748 ref->parent = node ? node->val : 0; in resolve_indirect_refs()
749 ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
759 ret = -ENOMEM; in resolve_indirect_refs()
763 new_ref->parent = node->val; in resolve_indirect_refs()
764 new_ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
765 prelim_ref_insert(fs_info, &preftrees->direct, in resolve_indirect_refs()
773 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL); in resolve_indirect_refs()
795 struct preftree *tree = &preftrees->indirect_missing_keys; in add_missing_keys()
798 while ((node = rb_first_cached(&tree->root))) { in add_missing_keys()
800 rb_erase_cached(node, &tree->root); in add_missing_keys()
802 BUG_ON(ref->parent); /* should not be a direct ref */ in add_missing_keys()
803 BUG_ON(ref->key_for_search.type); in add_missing_keys()
804 BUG_ON(!ref->wanted_disk_byte); in add_missing_keys()
806 eb = read_tree_block(fs_info, ref->wanted_disk_byte, in add_missing_keys()
807 ref->root_id, 0, ref->level - 1, NULL); in add_missing_keys()
815 return -EIO; in add_missing_keys()
821 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
823 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
827 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL); in add_missing_keys()
847 spin_lock(&head->lock); in add_delayed_refs()
848 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { in add_delayed_refs()
851 if (node->seq > seq) in add_delayed_refs()
854 switch (node->action) { in add_delayed_refs()
860 count = node->ref_mod; in add_delayed_refs()
863 count = node->ref_mod * -1; in add_delayed_refs()
868 switch (node->type) { in add_delayed_refs()
874 if (head->extent_op && head->extent_op->update_key) { in add_delayed_refs()
875 btrfs_disk_key_to_cpu(&key, &head->extent_op->key); in add_delayed_refs()
880 ret = add_indirect_ref(fs_info, preftrees, ref->root, in add_delayed_refs()
881 key_ptr, ref->level + 1, in add_delayed_refs()
882 node->bytenr, count, sc, in add_delayed_refs()
892 ret = add_direct_ref(fs_info, preftrees, ref->level + 1, in add_delayed_refs()
893 ref->parent, node->bytenr, count, in add_delayed_refs()
902 key.objectid = ref->objectid; in add_delayed_refs()
904 key.offset = ref->offset; in add_delayed_refs()
922 sc->have_delayed_delete_refs = true; in add_delayed_refs()
924 ret = add_indirect_ref(fs_info, preftrees, ref->root, in add_delayed_refs()
925 &key, 0, node->bytenr, count, sc, in add_delayed_refs()
935 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent, in add_delayed_refs()
936 node->bytenr, count, sc, in add_delayed_refs()
953 spin_unlock(&head->lock); in add_delayed_refs()
981 leaf = path->nodes[0]; in add_inline_refs()
982 slot = path->slots[0]; in add_inline_refs()
1003 *info_level = found_key.offset; in add_inline_refs()
1010 u64 offset; in add_inline_refs() local
1017 return -EUCLEAN; in add_inline_refs()
1019 offset = btrfs_extent_inline_ref_offset(leaf, iref); in add_inline_refs()
1024 *info_level + 1, offset, in add_inline_refs()
1034 ret = add_direct_ref(fs_info, preftrees, 0, offset, in add_inline_refs()
1039 ret = add_indirect_ref(fs_info, preftrees, offset, in add_inline_refs()
1048 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in add_inline_refs()
1053 key.offset = btrfs_extent_data_ref_offset(leaf, dref); in add_inline_refs()
1055 if (sc && sc->inum && key.objectid != sc->inum && in add_inline_refs()
1056 !sc->have_delayed_delete_refs) { in add_inline_refs()
1081 * add all non-inline backrefs for bytenr to the list
1090 struct btrfs_fs_info *fs_info = extent_root->fs_info; in add_keyed_refs()
1105 slot = path->slots[0]; in add_keyed_refs()
1106 leaf = path->nodes[0]; in add_keyed_refs()
1120 info_level + 1, key.offset, in add_keyed_refs()
1132 key.offset, bytenr, count, in add_keyed_refs()
1138 ret = add_indirect_ref(fs_info, preftrees, key.offset, in add_keyed_refs()
1154 key.offset = btrfs_extent_data_ref_offset(leaf, dref); in add_keyed_refs()
1156 if (sc && sc->inum && key.objectid != sc->inum && in add_keyed_refs()
1157 !sc->have_delayed_delete_refs) { in add_keyed_refs()
1190 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1224 key.offset = (u64)-1; in find_parent_nodes()
1232 return -ENOMEM; in find_parent_nodes()
1234 path->search_commit_root = 1; in find_parent_nodes()
1235 path->skip_locking = 1; in find_parent_nodes()
1239 path->skip_locking = 1; in find_parent_nodes()
1250 ret = -EUCLEAN; in find_parent_nodes()
1254 if (trans && likely(trans->type != __TRANS_DUMMY) && in find_parent_nodes()
1262 delayed_refs = &trans->transaction->delayed_refs; in find_parent_nodes()
1263 spin_lock(&delayed_refs->lock); in find_parent_nodes()
1266 if (!mutex_trylock(&head->mutex)) { in find_parent_nodes()
1267 refcount_inc(&head->refs); in find_parent_nodes()
1268 spin_unlock(&delayed_refs->lock); in find_parent_nodes()
1276 mutex_lock(&head->mutex); in find_parent_nodes()
1277 mutex_unlock(&head->mutex); in find_parent_nodes()
1281 spin_unlock(&delayed_refs->lock); in find_parent_nodes()
1284 mutex_unlock(&head->mutex); in find_parent_nodes()
1288 spin_unlock(&delayed_refs->lock); in find_parent_nodes()
1292 if (path->slots[0]) { in find_parent_nodes()
1296 path->slots[0]--; in find_parent_nodes()
1297 leaf = path->nodes[0]; in find_parent_nodes()
1298 slot = path->slots[0]; in find_parent_nodes()
1316 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0); in find_parent_nodes()
1339 node = rb_next(&ref->rbnode); in find_parent_nodes()
1341 * ref->count < 0 can happen here if there are delayed in find_parent_nodes()
1342 * refs with a node->action of BTRFS_DROP_DELAYED_REF. in find_parent_nodes()
1348 * and would retain their original ref->count < 0. in find_parent_nodes()
1350 if (roots && ref->count && ref->root_id && ref->parent == 0) { in find_parent_nodes()
1351 if (sc && sc->root_objectid && in find_parent_nodes()
1352 ref->root_id != sc->root_objectid) { in find_parent_nodes()
1358 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); in find_parent_nodes()
1362 if (ref->count && ref->parent) { in find_parent_nodes()
1363 if (extent_item_pos && !ref->inode_list && in find_parent_nodes()
1364 ref->level == 0) { in find_parent_nodes()
1367 eb = read_tree_block(fs_info, ref->parent, 0, in find_parent_nodes()
1368 0, ref->level, NULL); in find_parent_nodes()
1375 ret = -EIO; in find_parent_nodes()
1379 if (!path->skip_locking) in find_parent_nodes()
1383 if (!path->skip_locking) in find_parent_nodes()
1388 ref->inode_list = eie; in find_parent_nodes()
1396 ret = ulist_add_merge_ptr(refs, ref->parent, in find_parent_nodes()
1397 ref->inode_list, in find_parent_nodes()
1412 ret = -EUCLEAN; in find_parent_nodes()
1415 while (eie->next) in find_parent_nodes()
1416 eie = eie->next; in find_parent_nodes()
1417 eie->next = ref->inode_list; in find_parent_nodes()
1424 * use-after-free when our caller uses it or double in find_parent_nodes()
1427 ref->inode_list = NULL; in find_parent_nodes()
1446 * offset. key_list_head will point to a list of corresponding keys (caller must
1461 return -ENOMEM; in btrfs_find_all_leafs()
1465 if (ret < 0 && ret != -ENOENT) { in btrfs_find_all_leafs()
1498 return -ENOMEM; in btrfs_find_all_roots_safe()
1502 return -ENOMEM; in btrfs_find_all_roots_safe()
1509 if (ret < 0 && ret != -ENOENT) { in btrfs_find_all_roots_safe()
1518 bytenr = node->val; in btrfs_find_all_roots_safe()
1534 down_read(&fs_info->commit_root_sem); in btrfs_find_all_roots()
1538 up_read(&fs_info->commit_root_sem); in btrfs_find_all_roots()
1544 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1553 if (!cache->use_cache) in lookup_backref_shared_cache()
1560 * Level -1 is used for the data extent, which is not reliable to cache in lookup_backref_shared_cache()
1567 entry = &cache->entries[level]; in lookup_backref_shared_cache()
1570 if (entry->bytenr != bytenr) in lookup_backref_shared_cache()
1577 if (!entry->is_shared && in lookup_backref_shared_cache()
1578 entry->gen != btrfs_root_last_snapshot(&root->root_item)) in lookup_backref_shared_cache()
1586 if (entry->is_shared && in lookup_backref_shared_cache()
1587 entry->gen != btrfs_get_last_root_drop_gen(root->fs_info)) in lookup_backref_shared_cache()
1590 *is_shared = entry->is_shared; in lookup_backref_shared_cache()
1600 cache->entries[i].is_shared = true; in lookup_backref_shared_cache()
1601 cache->entries[i].gen = entry->gen; in lookup_backref_shared_cache()
1610 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1620 if (!cache->use_cache) in store_backref_shared_cache()
1627 * Level -1 is used for the data extent, which is not reliable to cache in store_backref_shared_cache()
1635 gen = btrfs_get_last_root_drop_gen(root->fs_info); in store_backref_shared_cache()
1637 gen = btrfs_root_last_snapshot(&root->root_item); in store_backref_shared_cache()
1639 entry = &cache->entries[level]; in store_backref_shared_cache()
1640 entry->bytenr = bytenr; in store_backref_shared_cache()
1641 entry->is_shared = is_shared; in store_backref_shared_cache()
1642 entry->gen = gen; in store_backref_shared_cache()
1653 entry = &cache->entries[i]; in store_backref_shared_cache()
1654 entry->is_shared = is_shared; in store_backref_shared_cache()
1655 entry->gen = gen; in store_backref_shared_cache()
1688 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_is_data_extent_shared()
1695 .root_objectid = root->root_key.objectid, in btrfs_is_data_extent_shared()
1707 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) { in btrfs_is_data_extent_shared()
1712 down_read(&fs_info->commit_root_sem); in btrfs_is_data_extent_shared()
1717 /* -1 means we are in the bytenr of the data extent. */ in btrfs_is_data_extent_shared()
1718 level = -1; in btrfs_is_data_extent_shared()
1720 cache->use_cache = true; in btrfs_is_data_extent_shared()
1735 if (ret < 0 && ret != -ENOENT) in btrfs_is_data_extent_shared()
1746 if (level == -1 && in btrfs_is_data_extent_shared()
1747 extent_gen > btrfs_root_last_snapshot(&root->root_item)) in btrfs_is_data_extent_shared()
1753 * with a count > 1 for the same offset, which means there are 2 in btrfs_is_data_extent_shared()
1754 * (or more) file extent items that point to the data extent - in btrfs_is_data_extent_shared()
1765 if (level == -1 && tmp->nnodes > 1) in btrfs_is_data_extent_shared()
1766 cache->use_cache = false; in btrfs_is_data_extent_shared()
1774 bytenr = node->val; in btrfs_is_data_extent_shared()
1791 up_read(&fs_info->commit_root_sem); in btrfs_is_data_extent_shared()
1813 key.offset = start_off; in btrfs_find_one_extref()
1820 leaf = path->nodes[0]; in btrfs_find_one_extref()
1821 slot = path->slots[0]; in btrfs_find_one_extref()
1824 * If the item at offset is not found, in btrfs_find_one_extref()
1835 ret = -ENOENT; in btrfs_find_one_extref()
1849 ret = -ENOENT; in btrfs_find_one_extref()
1856 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); in btrfs_find_one_extref()
1860 *found_off = found_key.offset; in btrfs_find_one_extref()
1870 * 0-terminated. the path is only given within the current file system.
1889 s64 bytes_left = ((s64)size) - 1; in btrfs_ref_to_path()
1898 bytes_left -= name_len; in btrfs_ref_to_path()
1903 if (!path->skip_locking) in btrfs_ref_to_path()
1910 ret = -ENOENT; in btrfs_ref_to_path()
1914 next_inum = found_key.offset; in btrfs_ref_to_path()
1920 slot = path->slots[0]; in btrfs_ref_to_path()
1921 eb = path->nodes[0]; in btrfs_ref_to_path()
1924 path->nodes[0] = NULL; in btrfs_ref_to_path()
1925 path->locks[0] = 0; in btrfs_ref_to_path()
1934 --bytes_left; in btrfs_ref_to_path()
1970 key.offset = (u64)-1; in extent_from_logical()
1979 ret = -ENOENT; in extent_from_logical()
1982 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); in extent_from_logical()
1983 if (found_key->type == BTRFS_METADATA_ITEM_KEY) in extent_from_logical()
1984 size = fs_info->nodesize; in extent_from_logical()
1985 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY) in extent_from_logical()
1986 size = found_key->offset; in extent_from_logical()
1988 if (found_key->objectid > logical || in extent_from_logical()
1989 found_key->objectid + size <= logical) { in extent_from_logical()
1992 return -ENOENT; in extent_from_logical()
1995 eb = path->nodes[0]; in extent_from_logical()
1996 item_size = btrfs_item_size(eb, path->slots[0]); in extent_from_logical()
1999 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); in extent_from_logical()
2004 logical, logical - found_key->objectid, found_key->objectid, in extent_from_logical()
2005 found_key->offset, flags, item_size); in extent_from_logical()
2018 return -EIO; in extent_from_logical()
2045 if (key->type == BTRFS_METADATA_ITEM_KEY) { in get_extent_inline_ref()
2050 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY); in get_extent_inline_ref()
2060 return -ENOENT; in get_extent_inline_ref()
2068 return -EUCLEAN; in get_extent_inline_ref()
2093 if (*ptr == (unsigned long)-1) in tree_backref_for_extent()
2113 if (key->type == BTRFS_EXTENT_ITEM_KEY) { in tree_backref_for_extent()
2119 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); in tree_backref_for_extent()
2120 *out_level = (u8)key->offset; in tree_backref_for_extent()
2124 *ptr = (unsigned long)-1; in tree_backref_for_extent()
2137 for (eie = inode_list; eie; eie = eie->next) { in iterate_leaf_refs()
2140 extent_item_objectid, eie->inum, in iterate_leaf_refs()
2141 eie->offset, root); in iterate_leaf_refs()
2142 ret = iterate(eie->inum, eie->offset, root, ctx); in iterate_leaf_refs()
2157 * when the iterator function returns a non-zero value, iteration stops.
2179 trans = btrfs_attach_transaction(fs_info->tree_root); in iterate_extent_inodes()
2181 if (PTR_ERR(trans) != -ENOENT && in iterate_extent_inodes()
2182 PTR_ERR(trans) != -EROFS) in iterate_extent_inodes()
2191 down_read(&fs_info->commit_root_sem); in iterate_extent_inodes()
2201 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val, in iterate_extent_inodes()
2210 root_node->val, ref_node->val, in iterate_extent_inodes()
2211 ref_node->aux); in iterate_extent_inodes()
2214 (uintptr_t)ref_node->aux, in iterate_extent_inodes()
2215 root_node->val, in iterate_extent_inodes()
2228 up_read(&fs_info->commit_root_sem); in iterate_extent_inodes()
2234 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx) in build_ino_list() argument
2239 if (inodes->bytes_left >= c) { in build_ino_list()
2240 inodes->bytes_left -= c; in build_ino_list()
2241 inodes->val[inodes->elem_cnt] = inum; in build_ino_list()
2242 inodes->val[inodes->elem_cnt + 1] = offset; in build_ino_list()
2243 inodes->val[inodes->elem_cnt + 2] = root; in build_ino_list()
2244 inodes->elem_cnt += 3; in build_ino_list()
2246 inodes->bytes_missing += c - inodes->bytes_left; in build_ino_list()
2247 inodes->bytes_left = 0; in build_ino_list()
2248 inodes->elem_missed += 3; in build_ino_list()
2262 int search_commit_root = path->search_commit_root; in iterate_inodes_from_logical()
2269 return -EINVAL; in iterate_inodes_from_logical()
2271 extent_item_pos = logical - found_key.objectid; in iterate_inodes_from_logical()
2291 struct btrfs_root *fs_root = ipath->fs_root; in iterate_inode_refs()
2292 struct btrfs_path *path = ipath->btrfs_path; in iterate_inode_refs()
2305 ret = found ? 0 : -ENOENT; in iterate_inode_refs()
2310 parent = found_key.offset; in iterate_inode_refs()
2311 slot = path->slots[0]; in iterate_inode_refs()
2312 eb = btrfs_clone_extent_buffer(path->nodes[0]); in iterate_inode_refs()
2314 ret = -ENOMEM; in iterate_inode_refs()
2324 btrfs_debug(fs_root->fs_info, in iterate_inode_refs()
2325 "following ref at offset %u for inode %llu in tree %llu", in iterate_inode_refs()
2327 fs_root->root_key.objectid); in iterate_inode_refs()
2347 u64 offset = 0; in iterate_inode_extrefs() local
2350 struct btrfs_root *fs_root = ipath->fs_root; in iterate_inode_extrefs()
2351 struct btrfs_path *path = ipath->btrfs_path; in iterate_inode_extrefs()
2359 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, in iterate_inode_extrefs()
2360 &offset); in iterate_inode_extrefs()
2364 ret = found ? 0 : -ENOENT; in iterate_inode_extrefs()
2369 slot = path->slots[0]; in iterate_inode_extrefs()
2370 eb = btrfs_clone_extent_buffer(path->nodes[0]); in iterate_inode_extrefs()
2372 ret = -ENOMEM; in iterate_inode_extrefs()
2388 (unsigned long)&extref->name, eb, ipath); in iterate_inode_extrefs()
2397 offset++; in iterate_inode_extrefs()
2414 int i = ipath->fspath->elem_cnt; in inode_to_path()
2418 bytes_left = ipath->fspath->bytes_left > s_ptr ? in inode_to_path()
2419 ipath->fspath->bytes_left - s_ptr : 0; in inode_to_path()
2421 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; in inode_to_path()
2422 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, in inode_to_path()
2428 ipath->fspath->val[i] = (u64)(unsigned long)fspath; in inode_to_path()
2429 ++ipath->fspath->elem_cnt; in inode_to_path()
2430 ipath->fspath->bytes_left = fspath - fspath_min; in inode_to_path()
2432 ++ipath->fspath->elem_missed; in inode_to_path()
2433 ipath->fspath->bytes_missing += fspath_min - fspath; in inode_to_path()
2434 ipath->fspath->bytes_left = 0; in inode_to_path()
2442 * is has been created large enough. each path is zero-terminated and accessed
2443 * from ipath->fspath->val[i].
2444 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2445 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2446 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2447 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2458 else if (ret != -ENOENT) in paths_from_inode()
2462 if (ret == -ENOENT && found_refs) in paths_from_inode()
2476 return ERR_PTR(-ENOMEM); in init_data_container()
2479 data->bytes_left = total_bytes - sizeof(*data); in init_data_container()
2480 data->bytes_missing = 0; in init_data_container()
2482 data->bytes_missing = sizeof(*data) - total_bytes; in init_data_container()
2483 data->bytes_left = 0; in init_data_container()
2486 data->elem_cnt = 0; in init_data_container()
2487 data->elem_missed = 0; in init_data_container()
2495 * information will be total_bytes - sizeof(struct inode_fs_paths).
2511 return ERR_PTR(-ENOMEM); in init_ipath()
2514 ifp->btrfs_path = path; in init_ipath()
2515 ifp->fspath = fspath; in init_ipath()
2516 ifp->fs_root = fs_root; in init_ipath()
2525 kvfree(ipath->fspath); in free_ipath()
2538 ret->path = btrfs_alloc_path(); in btrfs_backref_iter_alloc()
2539 if (!ret->path) { in btrfs_backref_iter_alloc()
2545 ret->path->search_commit_root = 1; in btrfs_backref_iter_alloc()
2546 ret->path->skip_locking = 1; in btrfs_backref_iter_alloc()
2547 ret->fs_info = fs_info; in btrfs_backref_iter_alloc()
2554 struct btrfs_fs_info *fs_info = iter->fs_info; in btrfs_backref_iter_start()
2556 struct btrfs_path *path = iter->path; in btrfs_backref_iter_start()
2563 key.offset = (u64)-1; in btrfs_backref_iter_start()
2564 iter->bytenr = bytenr; in btrfs_backref_iter_start()
2570 ret = -EUCLEAN; in btrfs_backref_iter_start()
2573 if (path->slots[0] == 0) { in btrfs_backref_iter_start()
2575 ret = -EUCLEAN; in btrfs_backref_iter_start()
2578 path->slots[0]--; in btrfs_backref_iter_start()
2580 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in btrfs_backref_iter_start()
2583 ret = -ENOENT; in btrfs_backref_iter_start()
2586 memcpy(&iter->cur_key, &key, sizeof(key)); in btrfs_backref_iter_start()
2587 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], in btrfs_backref_iter_start()
2588 path->slots[0]); in btrfs_backref_iter_start()
2589 iter->end_ptr = (u32)(iter->item_ptr + in btrfs_backref_iter_start()
2590 btrfs_item_size(path->nodes[0], path->slots[0])); in btrfs_backref_iter_start()
2591 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], in btrfs_backref_iter_start()
2597 * This is an extra precaution for non skinny-metadata, where in btrfs_backref_iter_start()
2601 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) { in btrfs_backref_iter_start()
2602 ret = -ENOTSUPP; in btrfs_backref_iter_start()
2605 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei)); in btrfs_backref_iter_start()
2608 if (iter->cur_ptr >= iter->end_ptr) { in btrfs_backref_iter_start()
2613 ret = -ENOENT; in btrfs_backref_iter_start()
2619 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, in btrfs_backref_iter_start()
2620 path->slots[0]); in btrfs_backref_iter_start()
2621 if (iter->cur_key.objectid != bytenr || in btrfs_backref_iter_start()
2622 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY && in btrfs_backref_iter_start()
2623 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) { in btrfs_backref_iter_start()
2624 ret = -ENOENT; in btrfs_backref_iter_start()
2627 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], in btrfs_backref_iter_start()
2628 path->slots[0]); in btrfs_backref_iter_start()
2629 iter->item_ptr = iter->cur_ptr; in btrfs_backref_iter_start()
2630 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size( in btrfs_backref_iter_start()
2631 path->nodes[0], path->slots[0])); in btrfs_backref_iter_start()
2644 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2654 struct btrfs_path *path = iter->path; in btrfs_backref_iter_next()
2661 ASSERT(iter->cur_ptr < iter->end_ptr); in btrfs_backref_iter_next()
2671 ((unsigned long)iter->cur_ptr); in btrfs_backref_iter_next()
2676 iter->cur_ptr += size; in btrfs_backref_iter_next()
2677 if (iter->cur_ptr < iter->end_ptr) in btrfs_backref_iter_next()
2684 extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr); in btrfs_backref_iter_next()
2685 ret = btrfs_next_item(extent_root, iter->path); in btrfs_backref_iter_next()
2689 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]); in btrfs_backref_iter_next()
2690 if (iter->cur_key.objectid != iter->bytenr || in btrfs_backref_iter_next()
2691 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY && in btrfs_backref_iter_next()
2692 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY)) in btrfs_backref_iter_next()
2694 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], in btrfs_backref_iter_next()
2695 path->slots[0]); in btrfs_backref_iter_next()
2696 iter->cur_ptr = iter->item_ptr; in btrfs_backref_iter_next()
2697 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0], in btrfs_backref_iter_next()
2698 path->slots[0]); in btrfs_backref_iter_next()
2707 cache->rb_root = RB_ROOT; in btrfs_backref_init_cache()
2709 INIT_LIST_HEAD(&cache->pending[i]); in btrfs_backref_init_cache()
2710 INIT_LIST_HEAD(&cache->changed); in btrfs_backref_init_cache()
2711 INIT_LIST_HEAD(&cache->detached); in btrfs_backref_init_cache()
2712 INIT_LIST_HEAD(&cache->leaves); in btrfs_backref_init_cache()
2713 INIT_LIST_HEAD(&cache->pending_edge); in btrfs_backref_init_cache()
2714 INIT_LIST_HEAD(&cache->useless_node); in btrfs_backref_init_cache()
2715 cache->fs_info = fs_info; in btrfs_backref_init_cache()
2716 cache->is_reloc = is_reloc; in btrfs_backref_init_cache()
2729 INIT_LIST_HEAD(&node->list); in btrfs_backref_alloc_node()
2730 INIT_LIST_HEAD(&node->upper); in btrfs_backref_alloc_node()
2731 INIT_LIST_HEAD(&node->lower); in btrfs_backref_alloc_node()
2732 RB_CLEAR_NODE(&node->rb_node); in btrfs_backref_alloc_node()
2733 cache->nr_nodes++; in btrfs_backref_alloc_node()
2734 node->level = level; in btrfs_backref_alloc_node()
2735 node->bytenr = bytenr; in btrfs_backref_alloc_node()
2743 struct btrfs_backref_edge *edge; in btrfs_backref_alloc_edge() local
2745 edge = kzalloc(sizeof(*edge), GFP_NOFS); in btrfs_backref_alloc_edge()
2746 if (edge) in btrfs_backref_alloc_edge()
2747 cache->nr_edges++; in btrfs_backref_alloc_edge()
2748 return edge; in btrfs_backref_alloc_edge()
2762 struct btrfs_backref_edge *edge; in btrfs_backref_cleanup_node() local
2767 BUG_ON(!node->lowest && !node->detached); in btrfs_backref_cleanup_node()
2768 while (!list_empty(&node->upper)) { in btrfs_backref_cleanup_node()
2769 edge = list_entry(node->upper.next, struct btrfs_backref_edge, in btrfs_backref_cleanup_node()
2771 upper = edge->node[UPPER]; in btrfs_backref_cleanup_node()
2772 list_del(&edge->list[LOWER]); in btrfs_backref_cleanup_node()
2773 list_del(&edge->list[UPPER]); in btrfs_backref_cleanup_node()
2774 btrfs_backref_free_edge(cache, edge); in btrfs_backref_cleanup_node()
2780 if (list_empty(&upper->lower)) { in btrfs_backref_cleanup_node()
2781 list_add_tail(&upper->lower, &cache->leaves); in btrfs_backref_cleanup_node()
2782 upper->lowest = 1; in btrfs_backref_cleanup_node()
2797 while (!list_empty(&cache->detached)) { in btrfs_backref_release_cache()
2798 node = list_entry(cache->detached.next, in btrfs_backref_release_cache()
2803 while (!list_empty(&cache->leaves)) { in btrfs_backref_release_cache()
2804 node = list_entry(cache->leaves.next, in btrfs_backref_release_cache()
2809 cache->last_trans = 0; in btrfs_backref_release_cache()
2812 ASSERT(list_empty(&cache->pending[i])); in btrfs_backref_release_cache()
2813 ASSERT(list_empty(&cache->pending_edge)); in btrfs_backref_release_cache()
2814 ASSERT(list_empty(&cache->useless_node)); in btrfs_backref_release_cache()
2815 ASSERT(list_empty(&cache->changed)); in btrfs_backref_release_cache()
2816 ASSERT(list_empty(&cache->detached)); in btrfs_backref_release_cache()
2817 ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); in btrfs_backref_release_cache()
2818 ASSERT(!cache->nr_nodes); in btrfs_backref_release_cache()
2819 ASSERT(!cache->nr_edges); in btrfs_backref_release_cache()
2831 * type is btrfs_inline_ref_type, offset is
2838 struct btrfs_backref_edge *edge; in handle_direct_tree_backref() local
2842 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY); in handle_direct_tree_backref()
2845 if (ref_key->objectid == ref_key->offset) { in handle_direct_tree_backref()
2848 cur->is_reloc_root = 1; in handle_direct_tree_backref()
2850 if (cache->is_reloc) { in handle_direct_tree_backref()
2851 root = find_reloc_root(cache->fs_info, cur->bytenr); in handle_direct_tree_backref()
2853 return -ENOENT; in handle_direct_tree_backref()
2854 cur->root = root; in handle_direct_tree_backref()
2860 list_add(&cur->list, &cache->useless_node); in handle_direct_tree_backref()
2865 edge = btrfs_backref_alloc_edge(cache); in handle_direct_tree_backref()
2866 if (!edge) in handle_direct_tree_backref()
2867 return -ENOMEM; in handle_direct_tree_backref()
2869 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset); in handle_direct_tree_backref()
2872 upper = btrfs_backref_alloc_node(cache, ref_key->offset, in handle_direct_tree_backref()
2873 cur->level + 1); in handle_direct_tree_backref()
2875 btrfs_backref_free_edge(cache, edge); in handle_direct_tree_backref()
2876 return -ENOMEM; in handle_direct_tree_backref()
2883 list_add_tail(&edge->list[UPPER], &cache->pending_edge); in handle_direct_tree_backref()
2887 ASSERT(upper->checked); in handle_direct_tree_backref()
2888 INIT_LIST_HEAD(&edge->list[UPPER]); in handle_direct_tree_backref()
2890 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER); in handle_direct_tree_backref()
2912 struct btrfs_fs_info *fs_info = cache->fs_info; in handle_indirect_tree_backref()
2915 struct btrfs_backref_edge *edge; in handle_indirect_tree_backref() local
2923 root = btrfs_get_fs_root(fs_info, ref_key->offset, false); in handle_indirect_tree_backref()
2926 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in handle_indirect_tree_backref()
2927 cur->cowonly = 1; in handle_indirect_tree_backref()
2929 if (btrfs_root_level(&root->root_item) == cur->level) { in handle_indirect_tree_backref()
2931 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr); in handle_indirect_tree_backref()
2939 * completely relying on direct backref (key->offset is parent in handle_indirect_tree_backref()
2942 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) { in handle_indirect_tree_backref()
2944 list_add(&cur->list, &cache->useless_node); in handle_indirect_tree_backref()
2946 cur->root = root; in handle_indirect_tree_backref()
2951 level = cur->level + 1; in handle_indirect_tree_backref()
2954 path->search_commit_root = 1; in handle_indirect_tree_backref()
2955 path->skip_locking = 1; in handle_indirect_tree_backref()
2956 path->lowest_level = level; in handle_indirect_tree_backref()
2958 path->lowest_level = 0; in handle_indirect_tree_backref()
2963 if (ret > 0 && path->slots[level] > 0) in handle_indirect_tree_backref()
2964 path->slots[level]--; in handle_indirect_tree_backref()
2966 eb = path->nodes[level]; in handle_indirect_tree_backref()
2967 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) { in handle_indirect_tree_backref()
2970 cur->bytenr, level - 1, root->root_key.objectid, in handle_indirect_tree_backref()
2971 tree_key->objectid, tree_key->type, tree_key->offset); in handle_indirect_tree_backref()
2973 ret = -ENOENT; in handle_indirect_tree_backref()
2980 if (!path->nodes[level]) { in handle_indirect_tree_backref()
2981 ASSERT(btrfs_root_bytenr(&root->root_item) == in handle_indirect_tree_backref()
2982 lower->bytenr); in handle_indirect_tree_backref()
2985 cache->is_reloc) { in handle_indirect_tree_backref()
2987 list_add(&lower->list, &cache->useless_node); in handle_indirect_tree_backref()
2989 lower->root = root; in handle_indirect_tree_backref()
2994 edge = btrfs_backref_alloc_edge(cache); in handle_indirect_tree_backref()
2995 if (!edge) { in handle_indirect_tree_backref()
2997 ret = -ENOMEM; in handle_indirect_tree_backref()
3001 eb = path->nodes[level]; in handle_indirect_tree_backref()
3002 rb_node = rb_simple_search(&cache->rb_root, eb->start); in handle_indirect_tree_backref()
3004 upper = btrfs_backref_alloc_node(cache, eb->start, in handle_indirect_tree_backref()
3005 lower->level + 1); in handle_indirect_tree_backref()
3008 btrfs_backref_free_edge(cache, edge); in handle_indirect_tree_backref()
3009 ret = -ENOMEM; in handle_indirect_tree_backref()
3012 upper->owner = btrfs_header_owner(eb); in handle_indirect_tree_backref()
3013 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in handle_indirect_tree_backref()
3014 upper->cowonly = 1; in handle_indirect_tree_backref()
3021 upper->checked = 0; in handle_indirect_tree_backref()
3023 upper->checked = 1; in handle_indirect_tree_backref()
3030 if (!upper->checked && need_check) { in handle_indirect_tree_backref()
3032 list_add_tail(&edge->list[UPPER], in handle_indirect_tree_backref()
3033 &cache->pending_edge); in handle_indirect_tree_backref()
3035 if (upper->checked) in handle_indirect_tree_backref()
3037 INIT_LIST_HEAD(&edge->list[UPPER]); in handle_indirect_tree_backref()
3042 ASSERT(upper->checked); in handle_indirect_tree_backref()
3043 INIT_LIST_HEAD(&edge->list[UPPER]); in handle_indirect_tree_backref()
3044 if (!upper->owner) in handle_indirect_tree_backref()
3045 upper->owner = btrfs_header_owner(eb); in handle_indirect_tree_backref()
3047 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER); in handle_indirect_tree_backref()
3065 * links aren't yet bi-directional. Needs to finish such links.
3078 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_backref_add_tree_node()
3079 struct btrfs_backref_edge *edge; in btrfs_backref_add_tree_node() local
3083 ret = btrfs_backref_iter_start(iter, cur->bytenr); in btrfs_backref_add_tree_node()
3096 ret = -EUCLEAN; in btrfs_backref_add_tree_node()
3100 WARN_ON(cur->checked); in btrfs_backref_add_tree_node()
3101 if (!list_empty(&cur->upper)) { in btrfs_backref_add_tree_node()
3106 ASSERT(list_is_singular(&cur->upper)); in btrfs_backref_add_tree_node()
3107 edge = list_entry(cur->upper.next, struct btrfs_backref_edge, in btrfs_backref_add_tree_node()
3109 ASSERT(list_empty(&edge->list[UPPER])); in btrfs_backref_add_tree_node()
3110 exist = edge->node[UPPER]; in btrfs_backref_add_tree_node()
3115 if (!exist->checked) in btrfs_backref_add_tree_node()
3116 list_add_tail(&edge->list[UPPER], &cache->pending_edge); in btrfs_backref_add_tree_node()
3129 key.objectid = iter->bytenr; in btrfs_backref_add_tree_node()
3135 ((unsigned long)iter->cur_ptr); in btrfs_backref_add_tree_node()
3139 ret = -EUCLEAN; in btrfs_backref_add_tree_node()
3143 key.offset = btrfs_extent_inline_ref_offset(eb, iref); in btrfs_backref_add_tree_node()
3145 key.type = iter->cur_key.type; in btrfs_backref_add_tree_node()
3146 key.offset = iter->cur_key.offset; in btrfs_backref_add_tree_node()
3155 exist->owner == key.offset) || in btrfs_backref_add_tree_node()
3157 exist->bytenr == key.offset))) { in btrfs_backref_add_tree_node()
3162 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */ in btrfs_backref_add_tree_node()
3169 ret = -EINVAL; in btrfs_backref_add_tree_node()
3178 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset in btrfs_backref_add_tree_node()
3188 cur->checked = 1; in btrfs_backref_add_tree_node()
3201 struct list_head *useless_node = &cache->useless_node; in btrfs_backref_finish_upper_links()
3202 struct btrfs_backref_edge *edge; in btrfs_backref_finish_upper_links() local
3206 ASSERT(start->checked); in btrfs_backref_finish_upper_links()
3208 /* Insert this node to cache if it's not COW-only */ in btrfs_backref_finish_upper_links()
3209 if (!start->cowonly) { in btrfs_backref_finish_upper_links()
3210 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr, in btrfs_backref_finish_upper_links()
3211 &start->rb_node); in btrfs_backref_finish_upper_links()
3213 btrfs_backref_panic(cache->fs_info, start->bytenr, in btrfs_backref_finish_upper_links()
3214 -EEXIST); in btrfs_backref_finish_upper_links()
3215 list_add_tail(&start->lower, &cache->leaves); in btrfs_backref_finish_upper_links()
3223 list_for_each_entry(edge, &start->upper, list[LOWER]) in btrfs_backref_finish_upper_links()
3224 list_add_tail(&edge->list[UPPER], &pending_edge); in btrfs_backref_finish_upper_links()
3230 edge = list_first_entry(&pending_edge, in btrfs_backref_finish_upper_links()
3232 list_del_init(&edge->list[UPPER]); in btrfs_backref_finish_upper_links()
3233 upper = edge->node[UPPER]; in btrfs_backref_finish_upper_links()
3234 lower = edge->node[LOWER]; in btrfs_backref_finish_upper_links()
3237 if (upper->detached) { in btrfs_backref_finish_upper_links()
3238 list_del(&edge->list[LOWER]); in btrfs_backref_finish_upper_links()
3239 btrfs_backref_free_edge(cache, edge); in btrfs_backref_finish_upper_links()
3242 if (list_empty(&lower->upper)) in btrfs_backref_finish_upper_links()
3243 list_add(&lower->list, useless_node); in btrfs_backref_finish_upper_links()
3250 * So if we have upper->rb_node populated, this means a cache in btrfs_backref_finish_upper_links()
3251 * hit. We only need to link the edge, as @upper and all its in btrfs_backref_finish_upper_links()
3254 if (!RB_EMPTY_NODE(&upper->rb_node)) { in btrfs_backref_finish_upper_links()
3255 if (upper->lowest) { in btrfs_backref_finish_upper_links()
3256 list_del_init(&upper->lower); in btrfs_backref_finish_upper_links()
3257 upper->lowest = 0; in btrfs_backref_finish_upper_links()
3260 list_add_tail(&edge->list[UPPER], &upper->lower); in btrfs_backref_finish_upper_links()
3265 if (!upper->checked) { in btrfs_backref_finish_upper_links()
3267 return -EUCLEAN; in btrfs_backref_finish_upper_links()
3270 /* Sanity check, COW-only node has non-COW-only parent */ in btrfs_backref_finish_upper_links()
3271 if (start->cowonly != upper->cowonly) { in btrfs_backref_finish_upper_links()
3273 return -EUCLEAN; in btrfs_backref_finish_upper_links()
3276 /* Only cache non-COW-only (subvolume trees) tree blocks */ in btrfs_backref_finish_upper_links()
3277 if (!upper->cowonly) { in btrfs_backref_finish_upper_links()
3278 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr, in btrfs_backref_finish_upper_links()
3279 &upper->rb_node); in btrfs_backref_finish_upper_links()
3281 btrfs_backref_panic(cache->fs_info, in btrfs_backref_finish_upper_links()
3282 upper->bytenr, -EEXIST); in btrfs_backref_finish_upper_links()
3283 return -EUCLEAN; in btrfs_backref_finish_upper_links()
3287 list_add_tail(&edge->list[UPPER], &upper->lower); in btrfs_backref_finish_upper_links()
3293 list_for_each_entry(edge, &upper->upper, list[LOWER]) in btrfs_backref_finish_upper_links()
3294 list_add_tail(&edge->list[UPPER], &pending_edge); in btrfs_backref_finish_upper_links()
3304 struct btrfs_backref_edge *edge; in btrfs_backref_error_cleanup() local
3306 while (!list_empty(&cache->useless_node)) { in btrfs_backref_error_cleanup()
3307 lower = list_first_entry(&cache->useless_node, in btrfs_backref_error_cleanup()
3309 list_del_init(&lower->list); in btrfs_backref_error_cleanup()
3311 while (!list_empty(&cache->pending_edge)) { in btrfs_backref_error_cleanup()
3312 edge = list_first_entry(&cache->pending_edge, in btrfs_backref_error_cleanup()
3314 list_del(&edge->list[UPPER]); in btrfs_backref_error_cleanup()
3315 list_del(&edge->list[LOWER]); in btrfs_backref_error_cleanup()
3316 lower = edge->node[LOWER]; in btrfs_backref_error_cleanup()
3317 upper = edge->node[UPPER]; in btrfs_backref_error_cleanup()
3318 btrfs_backref_free_edge(cache, edge); in btrfs_backref_error_cleanup()
3324 if (list_empty(&lower->upper) && in btrfs_backref_error_cleanup()
3325 RB_EMPTY_NODE(&lower->rb_node)) in btrfs_backref_error_cleanup()
3326 list_add(&lower->list, &cache->useless_node); in btrfs_backref_error_cleanup()
3328 if (!RB_EMPTY_NODE(&upper->rb_node)) in btrfs_backref_error_cleanup()
3332 list_for_each_entry(edge, &upper->upper, list[LOWER]) in btrfs_backref_error_cleanup()
3333 list_add_tail(&edge->list[UPPER], in btrfs_backref_error_cleanup()
3334 &cache->pending_edge); in btrfs_backref_error_cleanup()
3335 if (list_empty(&upper->upper)) in btrfs_backref_error_cleanup()
3336 list_add(&upper->list, &cache->useless_node); in btrfs_backref_error_cleanup()
3339 while (!list_empty(&cache->useless_node)) { in btrfs_backref_error_cleanup()
3340 lower = list_first_entry(&cache->useless_node, in btrfs_backref_error_cleanup()
3342 list_del_init(&lower->list); in btrfs_backref_error_cleanup()
3349 ASSERT(list_empty(&cache->useless_node) && in btrfs_backref_error_cleanup()
3350 list_empty(&cache->pending_edge)); in btrfs_backref_error_cleanup()