Lines Matching +full:lower +full:- +full:case
1 // SPDX-License-Identifier: GPL-2.0
10 #include "disk-io.h"
14 #include "delayed-ref.h"
17 #include "tree-mod-log.h"
51 offset = extent_item_pos - data_offset; in check_extent_in_eb()
56 return -ENOMEM; in check_extent_in_eb()
58 e->next = *eie; in check_extent_in_eb()
59 e->inum = key->objectid; in check_extent_in_eb()
60 e->offset = key->offset + offset; in check_extent_in_eb()
71 eie_next = eie->next; in free_inode_elem_list()
133 * ref->count >0:
134 * - incremented when a ref->count transitions to >0
135 * - decremented when a ref->count transitions to <1
145 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0; in extent_is_shared()
158 return -ENOMEM; in btrfs_prelim_ref_init()
174 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
180 if (ref1->level < ref2->level) in prelim_ref_compare()
181 return -1; in prelim_ref_compare()
182 if (ref1->level > ref2->level) in prelim_ref_compare()
184 if (ref1->root_id < ref2->root_id) in prelim_ref_compare()
185 return -1; in prelim_ref_compare()
186 if (ref1->root_id > ref2->root_id) in prelim_ref_compare()
188 if (ref1->key_for_search.type < ref2->key_for_search.type) in prelim_ref_compare()
189 return -1; in prelim_ref_compare()
190 if (ref1->key_for_search.type > ref2->key_for_search.type) in prelim_ref_compare()
192 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid) in prelim_ref_compare()
193 return -1; in prelim_ref_compare()
194 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid) in prelim_ref_compare()
196 if (ref1->key_for_search.offset < ref2->key_for_search.offset) in prelim_ref_compare()
197 return -1; in prelim_ref_compare()
198 if (ref1->key_for_search.offset > ref2->key_for_search.offset) in prelim_ref_compare()
200 if (ref1->parent < ref2->parent) in prelim_ref_compare()
201 return -1; in prelim_ref_compare()
202 if (ref1->parent > ref2->parent) in prelim_ref_compare()
215 sc->share_count--; in update_share_count()
217 sc->share_count++; in update_share_count()
237 root = &preftree->root; in prelim_ref_insert()
238 p = &root->rb_root.rb_node; in prelim_ref_insert()
245 p = &(*p)->rb_left; in prelim_ref_insert()
247 p = &(*p)->rb_right; in prelim_ref_insert()
251 struct extent_inode_elem *eie = ref->inode_list; in prelim_ref_insert()
253 while (eie && eie->next) in prelim_ref_insert()
254 eie = eie->next; in prelim_ref_insert()
257 ref->inode_list = newref->inode_list; in prelim_ref_insert()
259 eie->next = newref->inode_list; in prelim_ref_insert()
261 preftree->count); in prelim_ref_insert()
263 * A delayed ref can have newref->count < 0. in prelim_ref_insert()
264 * The ref->count is updated to follow any in prelim_ref_insert()
267 update_share_count(sc, ref->count, in prelim_ref_insert()
268 ref->count + newref->count); in prelim_ref_insert()
269 ref->count += newref->count; in prelim_ref_insert()
275 update_share_count(sc, 0, newref->count); in prelim_ref_insert()
276 preftree->count++; in prelim_ref_insert()
277 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count); in prelim_ref_insert()
278 rb_link_node(&newref->rbnode, parent, p); in prelim_ref_insert()
279 rb_insert_color_cached(&newref->rbnode, root, leftmost); in prelim_ref_insert()
291 &preftree->root.rb_root, rbnode) in prelim_release()
294 preftree->root = RB_ROOT_CACHED; in prelim_release()
295 preftree->count = 0; in prelim_release()
300 * - obtaining the parent is the goal
301 * - if you add a key, you must know that it is a correct key
302 * - if you cannot add the parent or a correct key, then we will look into the
309 * --------------------+--------+----------+--------+----------
310 * parent logical | y | - | - | -
311 * key to resolve | - | y | y | y
312 * tree block logical | - | - | - | -
315 * - column 1: we've the parent -> done
316 * - column 2, 3, 4: we use the key to find the parent
322 * --------------------+--------+----------+--------+----------
323 * parent logical | y | - | y | -
324 * key to resolve | - | - | - | y
326 * root for resolving | - | y | y | y
328 * - column 1, 3: we've the parent -> done
329 * - column 2: we take the first key from the block to find the parent
331 * - column 4: we use the key to find the parent
349 return -ENOMEM; in add_prelim_ref()
351 ref->root_id = root_id; in add_prelim_ref()
353 ref->key_for_search = *key; in add_prelim_ref()
355 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); in add_prelim_ref()
357 ref->inode_list = NULL; in add_prelim_ref()
358 ref->level = level; in add_prelim_ref()
359 ref->count = count; in add_prelim_ref()
360 ref->parent = parent; in add_prelim_ref()
361 ref->wanted_disk_byte = wanted_disk_byte; in add_prelim_ref()
372 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level, in add_direct_ref()
383 struct preftree *tree = &preftrees->indirect; in add_indirect_ref()
386 tree = &preftrees->indirect_missing_keys; in add_indirect_ref()
393 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; in is_shared_data_backref()
407 p = &(*p)->rb_left; in is_shared_data_backref()
409 p = &(*p)->rb_right; in is_shared_data_backref()
426 struct btrfs_key *key_for_search = &ref->key_for_search; in add_all_parents()
430 u64 wanted_disk_byte = ref->wanted_disk_byte; in add_all_parents()
435 eb = path->nodes[level]; in add_all_parents()
436 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); in add_all_parents()
452 eb = path->nodes[0]; in add_all_parents()
453 if (path->slots[0] >= btrfs_header_nritems(eb) || in add_all_parents()
454 is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
455 ref->root_id != btrfs_header_owner(eb)) { in add_all_parents()
462 while (!ret && count < ref->count) { in add_all_parents()
463 eb = path->nodes[0]; in add_all_parents()
464 slot = path->slots[0]; in add_all_parents()
468 if (key.objectid != key_for_search->objectid || in add_all_parents()
478 (is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
479 ref->root_id != btrfs_header_owner(eb))) { in add_all_parents()
493 if (ref->key_for_search.offset == key.offset - data_offset) in add_all_parents()
506 ret = ulist_add_merge_ptr(parents, eb->start, in add_all_parents()
511 while (old->next) in add_all_parents()
512 old = old->next; in add_all_parents()
513 old->next = eie; in add_all_parents()
545 int level = ref->level; in resolve_indirect_ref()
546 struct btrfs_key search_key = ref->key_for_search; in resolve_indirect_ref()
556 if (path->search_commit_root) in resolve_indirect_ref()
557 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id); in resolve_indirect_ref()
559 root = btrfs_get_fs_root(fs_info, ref->root_id, false); in resolve_indirect_ref()
565 if (!path->search_commit_root && in resolve_indirect_ref()
566 test_bit(BTRFS_ROOT_DELETING, &root->state)) { in resolve_indirect_ref()
567 ret = -ENOENT; in resolve_indirect_ref()
572 ret = -ENOENT; in resolve_indirect_ref()
576 if (path->search_commit_root) in resolve_indirect_ref()
577 root_level = btrfs_header_level(root->commit_root); in resolve_indirect_ref()
579 root_level = btrfs_header_level(root->node); in resolve_indirect_ref()
593 * So if we detect such case we set the search key's offset to zero to in resolve_indirect_ref()
608 path->lowest_level = level; in resolve_indirect_ref()
616 ref->root_id, level, ref->count, ret, in resolve_indirect_ref()
617 ref->key_for_search.objectid, ref->key_for_search.type, in resolve_indirect_ref()
618 ref->key_for_search.offset); in resolve_indirect_ref()
622 eb = path->nodes[level]; in resolve_indirect_ref()
628 level--; in resolve_indirect_ref()
629 eb = path->nodes[level]; in resolve_indirect_ref()
637 path->lowest_level = 0; in resolve_indirect_ref()
647 return (struct extent_inode_elem *)(uintptr_t)node->aux; in unode_aux_to_inode_list()
681 return -ENOMEM; in resolve_indirect_refs()
689 while ((rnode = rb_first_cached(&preftrees->indirect.root))) { in resolve_indirect_refs()
693 if (WARN(ref->parent, in resolve_indirect_refs()
695 ret = -EINVAL; in resolve_indirect_refs()
699 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); in resolve_indirect_refs()
700 preftrees->indirect.count--; in resolve_indirect_refs()
702 if (ref->count == 0) { in resolve_indirect_refs()
707 if (sc && sc->root_objectid && in resolve_indirect_refs()
708 ref->root_id != sc->root_objectid) { in resolve_indirect_refs()
720 if (err == -ENOENT) { in resolve_indirect_refs()
721 prelim_ref_insert(fs_info, &preftrees->direct, ref, in resolve_indirect_refs()
733 ref->parent = node ? node->val : 0; in resolve_indirect_refs()
734 ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
744 ret = -ENOMEM; in resolve_indirect_refs()
748 new_ref->parent = node->val; in resolve_indirect_refs()
749 new_ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
750 prelim_ref_insert(fs_info, &preftrees->direct, in resolve_indirect_refs()
758 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL); in resolve_indirect_refs()
776 struct preftree *tree = &preftrees->indirect_missing_keys; in add_missing_keys()
779 while ((node = rb_first_cached(&tree->root))) { in add_missing_keys()
781 rb_erase_cached(node, &tree->root); in add_missing_keys()
783 BUG_ON(ref->parent); /* should not be a direct ref */ in add_missing_keys()
784 BUG_ON(ref->key_for_search.type); in add_missing_keys()
785 BUG_ON(!ref->wanted_disk_byte); in add_missing_keys()
787 eb = read_tree_block(fs_info, ref->wanted_disk_byte, in add_missing_keys()
788 ref->root_id, 0, ref->level - 1, NULL); in add_missing_keys()
795 return -EIO; in add_missing_keys()
800 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
802 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
806 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL); in add_missing_keys()
821 struct btrfs_delayed_extent_op *extent_op = head->extent_op; in add_delayed_refs()
828 if (extent_op && extent_op->update_key) in add_delayed_refs()
829 btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key); in add_delayed_refs()
831 spin_lock(&head->lock); in add_delayed_refs()
832 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { in add_delayed_refs()
835 if (node->seq > seq) in add_delayed_refs()
838 switch (node->action) { in add_delayed_refs()
839 case BTRFS_ADD_DELAYED_EXTENT: in add_delayed_refs()
840 case BTRFS_UPDATE_DELAYED_HEAD: in add_delayed_refs()
843 case BTRFS_ADD_DELAYED_REF: in add_delayed_refs()
844 count = node->ref_mod; in add_delayed_refs()
846 case BTRFS_DROP_DELAYED_REF: in add_delayed_refs()
847 count = node->ref_mod * -1; in add_delayed_refs()
852 switch (node->type) { in add_delayed_refs()
853 case BTRFS_TREE_BLOCK_REF_KEY: { in add_delayed_refs()
858 ret = add_indirect_ref(fs_info, preftrees, ref->root, in add_delayed_refs()
859 &tmp_op_key, ref->level + 1, in add_delayed_refs()
860 node->bytenr, count, sc, in add_delayed_refs()
864 case BTRFS_SHARED_BLOCK_REF_KEY: { in add_delayed_refs()
870 ret = add_direct_ref(fs_info, preftrees, ref->level + 1, in add_delayed_refs()
871 ref->parent, node->bytenr, count, in add_delayed_refs()
875 case BTRFS_EXTENT_DATA_REF_KEY: { in add_delayed_refs()
880 key.objectid = ref->objectid; in add_delayed_refs()
882 key.offset = ref->offset; in add_delayed_refs()
888 if (sc && sc->inum && ref->objectid != sc->inum) { in add_delayed_refs()
893 ret = add_indirect_ref(fs_info, preftrees, ref->root, in add_delayed_refs()
894 &key, 0, node->bytenr, count, sc, in add_delayed_refs()
898 case BTRFS_SHARED_DATA_REF_KEY: { in add_delayed_refs()
904 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent, in add_delayed_refs()
905 node->bytenr, count, sc, in add_delayed_refs()
922 spin_unlock(&head->lock); in add_delayed_refs()
950 leaf = path->nodes[0]; in add_inline_refs()
951 slot = path->slots[0]; in add_inline_refs()
986 return -EUCLEAN; in add_inline_refs()
991 case BTRFS_SHARED_BLOCK_REF_KEY: in add_inline_refs()
996 case BTRFS_SHARED_DATA_REF_KEY: { in add_inline_refs()
1007 case BTRFS_TREE_BLOCK_REF_KEY: in add_inline_refs()
1012 case BTRFS_EXTENT_DATA_REF_KEY: { in add_inline_refs()
1017 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in add_inline_refs()
1024 if (sc && sc->inum && key.objectid != sc->inum) { in add_inline_refs()
1048 * add all non-inline backrefs for bytenr to the list
1057 struct btrfs_root *extent_root = fs_info->extent_root; in add_keyed_refs()
1072 slot = path->slots[0]; in add_keyed_refs()
1073 leaf = path->nodes[0]; in add_keyed_refs()
1084 case BTRFS_SHARED_BLOCK_REF_KEY: in add_keyed_refs()
1090 case BTRFS_SHARED_DATA_REF_KEY: { in add_keyed_refs()
1103 case BTRFS_TREE_BLOCK_REF_KEY: in add_keyed_refs()
1109 case BTRFS_EXTENT_DATA_REF_KEY: { in add_keyed_refs()
1123 if (sc && sc->inum && key.objectid != sc->inum) { in add_keyed_refs()
1152 * behave much like trans == NULL case, the difference only lies in it will not
1154 * The special case is for qgroup to search roots in commit_transaction().
1156 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1189 key.offset = (u64)-1;
1197 return -ENOMEM;
1199 path->search_commit_root = 1;
1200 path->skip_locking = 1;
1204 path->skip_locking = 1;
1214 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1220 if (trans && likely(trans->type != __TRANS_DUMMY) &&
1229 delayed_refs = &trans->transaction->delayed_refs;
1230 spin_lock(&delayed_refs->lock);
1233 if (!mutex_trylock(&head->mutex)) {
1234 refcount_inc(&head->refs);
1235 spin_unlock(&delayed_refs->lock);
1243 mutex_lock(&head->mutex);
1244 mutex_unlock(&head->mutex);
1248 spin_unlock(&delayed_refs->lock);
1251 mutex_unlock(&head->mutex);
1255 spin_unlock(&delayed_refs->lock);
1259 if (path->slots[0]) {
1263 path->slots[0]--;
1264 leaf = path->nodes[0];
1265 slot = path->slots[0];
1283 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1306 node = rb_next(&ref->rbnode);
1308 * ref->count < 0 can happen here if there are delayed
1309 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1315 * and would retain their original ref->count < 0.
1317 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1318 if (sc && sc->root_objectid &&
1319 ref->root_id != sc->root_objectid) {
1325 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1329 if (ref->count && ref->parent) {
1330 if (extent_item_pos && !ref->inode_list &&
1331 ref->level == 0) {
1334 eb = read_tree_block(fs_info, ref->parent, 0,
1335 0, ref->level, NULL);
1341 ret = -EIO;
1345 if (!path->skip_locking)
1349 if (!path->skip_locking)
1354 ref->inode_list = eie;
1356 ret = ulist_add_merge_ptr(refs, ref->parent,
1357 ref->inode_list,
1367 while (eie->next)
1368 eie = eie->next;
1369 eie->next = ref->inode_list;
1396 if (!node->aux)
1400 node->aux = 0;
1423 return -ENOMEM;
1427 if (ret < 0 && ret != -ENOENT) {
1460 return -ENOMEM;
1464 return -ENOMEM;
1471 if (ret < 0 && ret != -ENOENT) {
1480 bytenr = node->val;
1496 down_read(&fs_info->commit_root_sem);
1500 up_read(&fs_info->commit_root_sem);
1527 struct btrfs_fs_info *fs_info = root->fs_info;
1534 .root_objectid = root->root_key.objectid,
1544 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1549 down_read(&fs_info->commit_root_sem);
1563 if (ret < 0 && ret != -ENOENT)
1569 bytenr = node->val;
1578 up_read(&fs_info->commit_root_sem);
1607 leaf = path->nodes[0];
1608 slot = path->slots[0];
1613 * where it should be inserted. In our case
1615 * next INODE_REF_KEY_V2 item. In the case
1622 ret = -ENOENT;
1636 ret = -ENOENT;
1643 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1657 * 0-terminated. the path is only given within the current file system.
1662 * in case the path buffer would overflow, the pointer is decremented further
1665 * required for the path to fit into the buffer. in that case, the returned
1676 s64 bytes_left = ((s64)size) - 1;
1685 bytes_left -= name_len;
1690 if (!path->skip_locking)
1697 ret = -ENOENT;
1707 slot = path->slots[0];
1708 eb = path->nodes[0];
1711 path->nodes[0] = NULL;
1712 path->locks[0] = 0;
1721 --bytes_left;
1756 key.offset = (u64)-1;
1758 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1762 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1765 ret = -ENOENT;
1768 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1769 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1770 size = fs_info->nodesize;
1771 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1772 size = found_key->offset;
1774 if (found_key->objectid > logical ||
1775 found_key->objectid + size <= logical) {
1778 return -ENOENT;
1781 eb = path->nodes[0];
1782 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1785 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1790 logical, logical - found_key->objectid, found_key->objectid,
1791 found_key->offset, flags, item_size);
1804 return -EIO;
1831 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1836 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1846 return -ENOENT;
1854 return -EUCLEAN;
1879 if (*ptr == (unsigned long)-1)
1899 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1905 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1906 *out_level = (u8)key->offset;
1910 *ptr = (unsigned long)-1;
1923 for (eie = inode_list; eie; eie = eie->next) {
1926 extent_item_objectid, eie->inum,
1927 eie->offset, root);
1928 ret = iterate(eie->inum, eie->offset, root, ctx);
1943 * when the iterator function returns a non-zero value, iteration stops.
1965 trans = btrfs_attach_transaction(fs_info->extent_root);
1967 if (PTR_ERR(trans) != -ENOENT &&
1968 PTR_ERR(trans) != -EROFS)
1977 down_read(&fs_info->commit_root_sem);
1987 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1996 root_node->val, ref_node->val,
1997 ref_node->aux);
2000 (uintptr_t)ref_node->aux,
2001 root_node->val,
2014 up_read(&fs_info->commit_root_sem);
2029 int search_commit_root = path->search_commit_root;
2036 return -EINVAL;
2038 extent_item_pos = logical - found_key.objectid;
2073 ret = found ? 0 : -ENOENT;
2079 slot = path->slots[0];
2080 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2082 ret = -ENOMEM;
2093 btrfs_debug(fs_root->fs_info,
2096 fs_root->root_key.objectid);
2133 ret = found ? 0 : -ENOENT;
2138 slot = path->slots[0];
2139 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2141 ret = -ENOMEM;
2157 (unsigned long)&extref->name, eb, ctx);
2184 else if (ret != -ENOENT)
2188 if (ret == -ENOENT && found_refs)
2196 * returns <0 in case of an error
2204 int i = ipath->fspath->elem_cnt;
2208 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2209 ipath->fspath->bytes_left - s_ptr : 0;
2211 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2212 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2218 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2219 ++ipath->fspath->elem_cnt;
2220 ipath->fspath->bytes_left = fspath - fspath_min;
2222 ++ipath->fspath->elem_missed;
2223 ipath->fspath->bytes_missing += fspath_min - fspath;
2224 ipath->fspath->bytes_left = 0;
2232 * is has been created large enough. each path is zero-terminated and accessed
2233 * from ipath->fspath->val[i].
2234 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2235 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2236 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2237 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2242 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2254 return ERR_PTR(-ENOMEM);
2257 data->bytes_left = total_bytes - sizeof(*data);
2258 data->bytes_missing = 0;
2260 data->bytes_missing = sizeof(*data) - total_bytes;
2261 data->bytes_left = 0;
2264 data->elem_cnt = 0;
2265 data->elem_missed = 0;
2273 * information will be total_bytes - sizeof(struct inode_fs_paths).
2289 return ERR_PTR(-ENOMEM);
2292 ifp->btrfs_path = path;
2293 ifp->fspath = fspath;
2294 ifp->fs_root = fs_root;
2303 kvfree(ipath->fspath);
2316 ret->path = btrfs_alloc_path();
2317 if (!ret->path) {
2323 ret->path->search_commit_root = 1;
2324 ret->path->skip_locking = 1;
2325 ret->fs_info = fs_info;
2332 struct btrfs_fs_info *fs_info = iter->fs_info;
2333 struct btrfs_path *path = iter->path;
2340 key.offset = (u64)-1;
2341 iter->bytenr = bytenr;
2343 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2347 ret = -EUCLEAN;
2350 if (path->slots[0] == 0) {
2352 ret = -EUCLEAN;
2355 path->slots[0]--;
2357 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2360 ret = -ENOENT;
2363 memcpy(&iter->cur_key, &key, sizeof(key));
2364 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2365 path->slots[0]);
2366 iter->end_ptr = (u32)(iter->item_ptr +
2367 btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2368 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2374 * This is an extra precaution for non skinny-metadata, where
2378 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2379 ret = -ENOTSUPP;
2382 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2385 if (iter->cur_ptr >= iter->end_ptr) {
2386 ret = btrfs_next_item(fs_info->extent_root, path);
2390 ret = -ENOENT;
2396 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2397 path->slots[0]);
2398 if (iter->cur_key.objectid != bytenr ||
2399 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2400 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2401 ret = -ENOENT;
2404 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2405 path->slots[0]);
2406 iter->item_ptr = iter->cur_ptr;
2407 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2408 path->nodes[0], path->slots[0]));
2421 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2430 struct btrfs_path *path = iter->path;
2437 ASSERT(iter->cur_ptr < iter->end_ptr);
2447 ((unsigned long)iter->cur_ptr);
2452 iter->cur_ptr += size;
2453 if (iter->cur_ptr < iter->end_ptr)
2460 ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2464 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2465 if (iter->cur_key.objectid != iter->bytenr ||
2466 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2467 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2469 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2470 path->slots[0]);
2471 iter->cur_ptr = iter->item_ptr;
2472 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2473 path->slots[0]);
2482 cache->rb_root = RB_ROOT;
2484 INIT_LIST_HEAD(&cache->pending[i]);
2485 INIT_LIST_HEAD(&cache->changed);
2486 INIT_LIST_HEAD(&cache->detached);
2487 INIT_LIST_HEAD(&cache->leaves);
2488 INIT_LIST_HEAD(&cache->pending_edge);
2489 INIT_LIST_HEAD(&cache->useless_node);
2490 cache->fs_info = fs_info;
2491 cache->is_reloc = is_reloc;
2504 INIT_LIST_HEAD(&node->list);
2505 INIT_LIST_HEAD(&node->upper);
2506 INIT_LIST_HEAD(&node->lower);
2507 RB_CLEAR_NODE(&node->rb_node);
2508 cache->nr_nodes++;
2509 node->level = level;
2510 node->bytenr = bytenr;
2522 cache->nr_edges++;
2542 BUG_ON(!node->lowest && !node->detached);
2543 while (!list_empty(&node->upper)) {
2544 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2545 list[LOWER]);
2546 upper = edge->node[UPPER];
2547 list_del(&edge->list[LOWER]);
2548 list_del(&edge->list[UPPER]);
2555 if (list_empty(&upper->lower)) {
2556 list_add_tail(&upper->lower, &cache->leaves);
2557 upper->lowest = 1;
2572 while (!list_empty(&cache->detached)) {
2573 node = list_entry(cache->detached.next,
2578 while (!list_empty(&cache->leaves)) {
2579 node = list_entry(cache->leaves.next,
2580 struct btrfs_backref_node, lower);
2584 cache->last_trans = 0;
2587 ASSERT(list_empty(&cache->pending[i]));
2588 ASSERT(list_empty(&cache->pending_edge));
2589 ASSERT(list_empty(&cache->useless_node));
2590 ASSERT(list_empty(&cache->changed));
2591 ASSERT(list_empty(&cache->detached));
2592 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2593 ASSERT(!cache->nr_nodes);
2594 ASSERT(!cache->nr_edges);
2617 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2620 if (ref_key->objectid == ref_key->offset) {
2623 cur->is_reloc_root = 1;
2625 if (cache->is_reloc) {
2626 root = find_reloc_root(cache->fs_info, cur->bytenr);
2628 return -ENOENT;
2629 cur->root = root;
2635 list_add(&cur->list, &cache->useless_node);
2642 return -ENOMEM;
2644 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2647 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2648 cur->level + 1);
2651 return -ENOMEM;
2658 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2662 ASSERT(upper->checked);
2663 INIT_LIST_HEAD(&edge->list[UPPER]);
2687 struct btrfs_fs_info *fs_info = cache->fs_info;
2689 struct btrfs_backref_node *lower; local
2698 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2701 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2702 cur->cowonly = 1;
2704 if (btrfs_root_level(&root->root_item) == cur->level) {
2706 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2714 * completely relying on direct backref (key->offset is parent
2717 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2719 list_add(&cur->list, &cache->useless_node);
2721 cur->root = root;
2726 level = cur->level + 1;
2729 path->search_commit_root = 1;
2730 path->skip_locking = 1;
2731 path->lowest_level = level;
2733 path->lowest_level = 0;
2738 if (ret > 0 && path->slots[level] > 0)
2739 path->slots[level]--;
2741 eb = path->nodes[level];
2742 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2745 cur->bytenr, level - 1, root->root_key.objectid,
2746 tree_key->objectid, tree_key->type, tree_key->offset);
2748 ret = -ENOENT;
2751 lower = cur;
2755 if (!path->nodes[level]) {
2756 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2757 lower->bytenr);
2760 cache->is_reloc) {
2762 list_add(&lower->list, &cache->useless_node);
2764 lower->root = root;
2772 ret = -ENOMEM;
2776 eb = path->nodes[level];
2777 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2779 upper = btrfs_backref_alloc_node(cache, eb->start,
2780 lower->level + 1);
2784 ret = -ENOMEM;
2787 upper->owner = btrfs_header_owner(eb);
2788 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2789 upper->cowonly = 1;
2796 upper->checked = 0;
2798 upper->checked = 1;
2805 if (!upper->checked && need_check) {
2807 list_add_tail(&edge->list[UPPER],
2808 &cache->pending_edge);
2810 if (upper->checked)
2812 INIT_LIST_HEAD(&edge->list[UPPER]);
2817 ASSERT(upper->checked);
2818 INIT_LIST_HEAD(&edge->list[UPPER]);
2819 if (!upper->owner)
2820 upper->owner = btrfs_header_owner(eb);
2822 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2828 lower = upper;
2840 * links aren't yet bi-directional. Needs to finish such links.
2853 struct btrfs_fs_info *fs_info = cache->fs_info;
2858 ret = btrfs_backref_iter_start(iter, cur->bytenr);
2871 ret = -EUCLEAN;
2875 WARN_ON(cur->checked);
2876 if (!list_empty(&cur->upper)) {
2881 ASSERT(list_is_singular(&cur->upper));
2882 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2883 list[LOWER]);
2884 ASSERT(list_empty(&edge->list[UPPER]));
2885 exist = edge->node[UPPER];
2890 if (!exist->checked)
2891 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2904 key.objectid = iter->bytenr;
2910 ((unsigned long)iter->cur_ptr);
2914 ret = -EUCLEAN;
2920 key.type = iter->cur_key.type;
2921 key.offset = iter->cur_key.offset;
2930 exist->owner == key.offset) ||
2932 exist->bytenr == key.offset))) {
2944 ret = -EINVAL;
2963 cur->checked = 1;
2976 struct list_head *useless_node = &cache->useless_node;
2981 ASSERT(start->checked);
2983 /* Insert this node to cache if it's not COW-only */
2984 if (!start->cowonly) {
2985 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
2986 &start->rb_node);
2988 btrfs_backref_panic(cache->fs_info, start->bytenr,
2989 -EEXIST);
2990 list_add_tail(&start->lower, &cache->leaves);
2998 list_for_each_entry(edge, &start->upper, list[LOWER])
2999 list_add_tail(&edge->list[UPPER], &pending_edge);
3003 struct btrfs_backref_node *lower; local
3007 list_del_init(&edge->list[UPPER]);
3008 upper = edge->node[UPPER];
3009 lower = edge->node[LOWER];
3012 if (upper->detached) {
3013 list_del(&edge->list[LOWER]);
3016 /* Lower node is orphan, queue for cleanup */
3017 if (list_empty(&lower->upper))
3018 list_add(&lower->list, useless_node);
3025 * So if we have upper->rb_node populated, this means a cache
3029 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3030 if (upper->lowest) {
3031 list_del_init(&upper->lower);
3032 upper->lowest = 0;
3035 list_add_tail(&edge->list[UPPER], &upper->lower);
3040 if (!upper->checked) {
3042 return -EUCLEAN;
3045 /* Sanity check, COW-only node has non-COW-only parent */
3046 if (start->cowonly != upper->cowonly) {
3048 return -EUCLEAN;
3051 /* Only cache non-COW-only (subvolume trees) tree blocks */
3052 if (!upper->cowonly) {
3053 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3054 &upper->rb_node);
3056 btrfs_backref_panic(cache->fs_info,
3057 upper->bytenr, -EEXIST);
3058 return -EUCLEAN;
3062 list_add_tail(&edge->list[UPPER], &upper->lower);
3068 list_for_each_entry(edge, &upper->upper, list[LOWER])
3069 list_add_tail(&edge->list[UPPER], &pending_edge);
3077 struct btrfs_backref_node *lower; local
3081 while (!list_empty(&cache->useless_node)) {
3082 lower = list_first_entry(&cache->useless_node,
3084 list_del_init(&lower->list);
3086 while (!list_empty(&cache->pending_edge)) {
3087 edge = list_first_entry(&cache->pending_edge,
3089 list_del(&edge->list[UPPER]);
3090 list_del(&edge->list[LOWER]);
3091 lower = edge->node[LOWER];
3092 upper = edge->node[UPPER];
3096 * Lower is no longer linked to any upper backref nodes and
3099 if (list_empty(&lower->upper) &&
3100 RB_EMPTY_NODE(&lower->rb_node))
3101 list_add(&lower->list, &cache->useless_node);
3103 if (!RB_EMPTY_NODE(&upper->rb_node))
3107 list_for_each_entry(edge, &upper->upper, list[LOWER])
3108 list_add_tail(&edge->list[UPPER],
3109 &cache->pending_edge);
3110 if (list_empty(&upper->upper))
3111 list_add(&upper->list, &cache->useless_node);
3114 while (!list_empty(&cache->useless_node)) {
3115 lower = list_first_entry(&cache->useless_node,
3117 list_del_init(&lower->list);
3118 if (lower == node)
3120 btrfs_backref_drop_node(cache, lower);
3124 ASSERT(list_empty(&cache->useless_node) &&
3125 list_empty(&cache->pending_edge));