Lines Matching full:path

22 		      *root, struct btrfs_path *path, int level);
24 const struct btrfs_key *ins_key, struct btrfs_path *path,
32 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
84 /* this also releases the path */
94 * path release drops references on the extent buffers in the path
95 * and it drops any locks held by this path
883 struct btrfs_path *path, int level) in balance_level() argument
893 int orig_slot = path->slots[level]; in balance_level()
898 mid = path->nodes[level]; in balance_level()
900 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); in balance_level()
906 parent = path->nodes[level + 1]; in balance_level()
907 pslot = path->slots[level + 1]; in balance_level()
944 path->locks[level] = 0; in balance_level()
945 path->nodes[level] = NULL; in balance_level()
948 /* once for the path */ in balance_level()
1009 del_ptr(root, path, level + 1, pslot + 1); in balance_level()
1055 del_ptr(root, path, level + 1, pslot); in balance_level()
1071 /* update the path */ in balance_level()
1076 path->nodes[level] = left; in balance_level()
1077 path->slots[level + 1] -= 1; in balance_level()
1078 path->slots[level] = orig_slot; in balance_level()
1085 path->slots[level] = orig_slot; in balance_level()
1090 btrfs_node_blockptr(path->nodes[level], path->slots[level])) in balance_level()
1098 if (path->nodes[level] != left) in balance_level()
1111 struct btrfs_path *path, int level) in push_nodes_for_insert() argument
1121 int orig_slot = path->slots[level]; in push_nodes_for_insert()
1126 mid = path->nodes[level]; in push_nodes_for_insert()
1130 parent = path->nodes[level + 1]; in push_nodes_for_insert()
1131 pslot = path->slots[level + 1]; in push_nodes_for_insert()
1172 path->nodes[level] = left; in push_nodes_for_insert()
1173 path->slots[level + 1] -= 1; in push_nodes_for_insert()
1174 path->slots[level] = orig_slot; in push_nodes_for_insert()
1180 path->slots[level] = orig_slot; in push_nodes_for_insert()
1227 path->nodes[level] = right; in push_nodes_for_insert()
1228 path->slots[level + 1] += 1; in push_nodes_for_insert()
1229 path->slots[level] = orig_slot - in push_nodes_for_insert()
1250 struct btrfs_path *path, in reada_for_search() argument
1264 if (level != 1 && path->reada != READA_FORWARD_ALWAYS) in reada_for_search()
1267 if (!path->nodes[level]) in reada_for_search()
1270 node = path->nodes[level]; in reada_for_search()
1277 if (path->reada == READA_FORWARD_ALWAYS) { in reada_for_search()
1288 if (path->reada != READA_FORWARD_ALWAYS) { in reada_for_search()
1304 if (path->reada == READA_BACK) { in reada_for_search()
1308 } else if (path->reada == READA_FORWARD || in reada_for_search()
1309 path->reada == READA_FORWARD_ALWAYS) { in reada_for_search()
1314 if (path->reada == READA_BACK && objectid) { in reada_for_search()
1320 if (path->reada == READA_FORWARD_ALWAYS || in reada_for_search()
1332 static noinline void reada_for_balance(struct btrfs_path *path, int level) in reada_for_balance() argument
1338 parent = path->nodes[level + 1]; in reada_for_balance()
1343 slot = path->slots[level + 1]; in reada_for_balance()
1354 * in the tree. The exceptions are when our path goes through slot 0, because
1358 * callers might also have set path->keep_locks, which tells this code to keep
1359 * the lock if the path points to the last slot in the block. This is part of
1365 static noinline void unlock_up(struct btrfs_path *path, int level, in unlock_up() argument
1374 if (!path->nodes[i]) in unlock_up()
1376 if (!path->locks[i]) in unlock_up()
1380 if (path->slots[i] == 0) { in unlock_up()
1385 if (path->keep_locks) { in unlock_up()
1388 nritems = btrfs_header_nritems(path->nodes[i]); in unlock_up()
1389 if (nritems < 1 || path->slots[i] >= nritems - 1) { in unlock_up()
1398 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); in unlock_up()
1399 path->locks[i] = 0; in unlock_up()
1415 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the
1537 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1586 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, in btrfs_find_item() argument
1594 ASSERT(path); in btrfs_find_item()
1601 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); in btrfs_find_item()
1605 eb = path->nodes[0]; in btrfs_find_item()
1606 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { in btrfs_find_item()
1607 ret = btrfs_next_leaf(fs_root, path); in btrfs_find_item()
1610 eb = path->nodes[0]; in btrfs_find_item()
1613 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); in btrfs_find_item()
1704 * Replace the extent buffer at the lowest level of the path with a cloned
1715 static int finish_need_commit_sem_search(struct btrfs_path *path) in finish_need_commit_sem_search() argument
1717 const int i = path->lowest_level; in finish_need_commit_sem_search()
1718 const int slot = path->slots[i]; in finish_need_commit_sem_search()
1719 struct extent_buffer *lowest = path->nodes[i]; in finish_need_commit_sem_search()
1722 ASSERT(path->need_commit_sem); in finish_need_commit_sem_search()
1733 btrfs_release_path(path); in finish_need_commit_sem_search()
1734 path->nodes[i] = clone; in finish_need_commit_sem_search()
1735 path->slots[i] = slot; in finish_need_commit_sem_search()
1764 struct btrfs_path *path, in search_leaf() argument
1768 struct extent_buffer *leaf = path->nodes[0]; in search_leaf()
1789 * !path->locks[1] means we have a single node tree, the leaf is in search_leaf()
1792 if (path->locks[1] && leaf_free_space >= ins_len) { in search_leaf()
1819 btrfs_unlock_up_safe(path, 1); in search_leaf()
1837 btrfs_unlock_up_safe(path, 1); in search_leaf()
1844 path->slots[0] = 0; in search_leaf()
1851 prev_cmp, &path->slots[0]); in search_leaf()
1866 if (ret == 0 && !path->search_for_extension) { in search_leaf()
1876 err = split_leaf(trans, root, key, path, ins_len, in search_leaf()
1894 * @p: Holds all btree nodes along the search path
1912 * of the path (level 0)
1914 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2001 * then we don't want to set the path blocking, in btrfs_search_slot()
2166 * The resulting path and return value will be set up as if we called
2287 * a return value of 1 means the path is at the position where the in btrfs_search_slot_for_read()
2289 * but in case the previous item is the last in a leaf, path points in btrfs_search_slot_for_read()
2346 struct btrfs_path *path) in btrfs_search_backwards() argument
2350 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); in btrfs_search_backwards()
2352 ret = btrfs_previous_item(root, path, key->objectid, key->type); in btrfs_search_backwards()
2355 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); in btrfs_search_backwards()
2361 * Search for a valid slot for the given path.
2365 * @path: The starting point to validate the slot.
2372 struct btrfs_path *path) in btrfs_get_next_valid_item() argument
2376 const int slot = path->slots[0]; in btrfs_get_next_valid_item()
2377 const struct extent_buffer *leaf = path->nodes[0]; in btrfs_get_next_valid_item()
2379 /* This is where we start walking the path. */ in btrfs_get_next_valid_item()
2383 * to go to the next leaf and reset the path. in btrfs_get_next_valid_item()
2385 ret = btrfs_next_leaf(root, path); in btrfs_get_next_valid_item()
2405 static void fixup_low_keys(struct btrfs_path *path, in fixup_low_keys() argument
2413 int tslot = path->slots[i]; in fixup_low_keys()
2415 if (!path->nodes[i]) in fixup_low_keys()
2417 t = path->nodes[i]; in fixup_low_keys()
2422 btrfs_mark_buffer_dirty(path->nodes[i]); in fixup_low_keys()
2435 struct btrfs_path *path, in btrfs_set_item_key_safe() argument
2442 eb = path->nodes[0]; in btrfs_set_item_key_safe()
2443 slot = path->slots[0]; in btrfs_set_item_key_safe()
2477 fixup_low_keys(path, &disk_key, 1); in btrfs_set_item_key_safe()
2692 struct btrfs_path *path, int level) in insert_new_root() argument
2702 BUG_ON(path->nodes[level]); in insert_new_root()
2703 BUG_ON(path->nodes[level-1] != root->node); in insert_new_root()
2705 lower = path->nodes[level-1]; in insert_new_root()
2739 path->nodes[level] = c; in insert_new_root()
2740 path->locks[level] = BTRFS_WRITE_LOCK; in insert_new_root()
2741 path->slots[level] = 0; in insert_new_root()
2753 struct btrfs_path *path, in insert_ptr() argument
2761 BUG_ON(!path->nodes[level]); in insert_ptr()
2762 btrfs_assert_tree_write_locked(path->nodes[level]); in insert_ptr()
2763 lower = path->nodes[level]; in insert_ptr()
2792 * split the node at the specified level in path in two.
2793 * The path is corrected to point to the appropriate node after the split
2802 struct btrfs_path *path, int level) in split_node() argument
2812 c = path->nodes[level]; in split_node()
2825 ret = insert_new_root(trans, root, path, level + 1); in split_node()
2829 ret = push_nodes_for_insert(trans, root, path, level); in split_node()
2830 c = path->nodes[level]; in split_node()
2866 insert_ptr(trans, path, &disk_key, split->start, in split_node()
2867 path->slots[level + 1] + 1, level + 1); in split_node()
2869 if (path->slots[level] >= mid) { in split_node()
2870 path->slots[level] -= mid; in split_node()
2873 path->nodes[level] = split; in split_node()
2874 path->slots[level + 1] += 1; in split_node()
2928 static noinline int __push_leaf_right(struct btrfs_path *path, in __push_leaf_right() argument
2935 struct extent_buffer *left = path->nodes[0]; in __push_leaf_right()
2936 struct extent_buffer *upper = path->nodes[1]; in __push_leaf_right()
2953 if (path->slots[0] >= left_nritems) in __push_leaf_right()
2956 slot = path->slots[1]; in __push_leaf_right()
2960 if (path->slots[0] > i) in __push_leaf_right()
2962 if (path->slots[0] == i) { in __push_leaf_right()
2970 if (path->slots[0] == i) in __push_leaf_right()
3042 /* then fixup the leaf pointer in the path */ in __push_leaf_right()
3043 if (path->slots[0] >= left_nritems) { in __push_leaf_right()
3044 path->slots[0] -= left_nritems; in __push_leaf_right()
3045 if (btrfs_header_nritems(path->nodes[0]) == 0) in __push_leaf_right()
3046 btrfs_clean_tree_block(path->nodes[0]); in __push_leaf_right()
3047 btrfs_tree_unlock(path->nodes[0]); in __push_leaf_right()
3048 free_extent_buffer(path->nodes[0]); in __push_leaf_right()
3049 path->nodes[0] = right; in __push_leaf_right()
3050 path->slots[1] += 1; in __push_leaf_right()
3064 * push some data in the path leaf to the right, trying to free up at
3074 *root, struct btrfs_path *path, in push_leaf_right() argument
3078 struct extent_buffer *left = path->nodes[0]; in push_leaf_right()
3086 if (!path->nodes[1]) in push_leaf_right()
3089 slot = path->slots[1]; in push_leaf_right()
3090 upper = path->nodes[1]; in push_leaf_right()
3094 btrfs_assert_tree_write_locked(path->nodes[1]); in push_leaf_right()
3125 if (path->slots[0] == left_nritems && !empty) { in push_leaf_right()
3132 path->nodes[0] = right; in push_leaf_right()
3133 path->slots[0] = 0; in push_leaf_right()
3134 path->slots[1]++; in push_leaf_right()
3138 return __push_leaf_right(path, min_data_size, empty, in push_leaf_right()
3147 * push some data in the path leaf to the left, trying to free up at
3154 static noinline int __push_leaf_left(struct btrfs_path *path, int data_size, in __push_leaf_left() argument
3161 struct extent_buffer *right = path->nodes[0]; in __push_leaf_left()
3179 if (path->slots[0] < i) in __push_leaf_left()
3181 if (path->slots[0] == i) { in __push_leaf_left()
3189 if (path->slots[0] == i) in __push_leaf_left()
3270 fixup_low_keys(path, &disk_key, 1); in __push_leaf_left()
3272 /* then fixup the leaf pointer in the path */ in __push_leaf_left()
3273 if (path->slots[0] < push_items) { in __push_leaf_left()
3274 path->slots[0] += old_left_nritems; in __push_leaf_left()
3275 btrfs_tree_unlock(path->nodes[0]); in __push_leaf_left()
3276 free_extent_buffer(path->nodes[0]); in __push_leaf_left()
3277 path->nodes[0] = left; in __push_leaf_left()
3278 path->slots[1] -= 1; in __push_leaf_left()
3282 path->slots[0] -= push_items; in __push_leaf_left()
3284 BUG_ON(path->slots[0] < 0); in __push_leaf_left()
3293 * push some data in the path leaf to the left, trying to free up at
3301 *root, struct btrfs_path *path, int min_data_size, in push_leaf_left() argument
3304 struct extent_buffer *right = path->nodes[0]; in push_leaf_left()
3311 slot = path->slots[1]; in push_leaf_left()
3314 if (!path->nodes[1]) in push_leaf_left()
3321 btrfs_assert_tree_write_locked(path->nodes[1]); in push_leaf_left()
3323 left = btrfs_read_node_slot(path->nodes[1], slot - 1); in push_leaf_left()
3340 path->nodes[1], slot - 1, &left, in push_leaf_left()
3353 return __push_leaf_left(path, min_data_size, in push_leaf_left()
3363 * split the path's leaf in two, making sure there is at least data_size
3364 * available for the resulting leaf level of the path.
3367 struct btrfs_path *path, in copy_for_split() argument
3404 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); in copy_for_split()
3408 BUG_ON(path->slots[0] != slot); in copy_for_split()
3411 btrfs_tree_unlock(path->nodes[0]); in copy_for_split()
3412 free_extent_buffer(path->nodes[0]); in copy_for_split()
3413 path->nodes[0] = right; in copy_for_split()
3414 path->slots[0] -= mid; in copy_for_split()
3415 path->slots[1] += 1; in copy_for_split()
3421 BUG_ON(path->slots[0] < 0); in copy_for_split()
3436 struct btrfs_path *path, in push_for_double_split() argument
3445 slot = path->slots[0]; in push_for_double_split()
3446 if (slot < btrfs_header_nritems(path->nodes[0])) in push_for_double_split()
3447 space_needed -= btrfs_leaf_free_space(path->nodes[0]); in push_for_double_split()
3453 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); in push_for_double_split()
3460 nritems = btrfs_header_nritems(path->nodes[0]); in push_for_double_split()
3465 if (path->slots[0] == 0 || path->slots[0] == nritems) in push_for_double_split()
3468 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) in push_for_double_split()
3472 slot = path->slots[0]; in push_for_double_split()
3475 space_needed -= btrfs_leaf_free_space(path->nodes[0]); in push_for_double_split()
3476 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); in push_for_double_split()
3489 * split the path's leaf in two, making sure there is at least data_size
3490 * available for the resulting leaf level of the path.
3497 struct btrfs_path *path, int data_size, in split_leaf() argument
3513 l = path->nodes[0]; in split_leaf()
3514 slot = path->slots[0]; in split_leaf()
3520 if (data_size && path->nodes[1]) { in split_leaf()
3526 wret = push_leaf_right(trans, root, path, space_needed, in split_leaf()
3534 wret = push_leaf_left(trans, root, path, space_needed, in split_leaf()
3539 l = path->nodes[0]; in split_leaf()
3546 if (!path->nodes[1]) { in split_leaf()
3547 ret = insert_new_root(trans, root, path, 1); in split_leaf()
3553 l = path->nodes[0]; in split_leaf()
3554 slot = path->slots[0]; in split_leaf()
3620 insert_ptr(trans, path, &disk_key, in split_leaf()
3621 right->start, path->slots[1] + 1, 1); in split_leaf()
3622 btrfs_tree_unlock(path->nodes[0]); in split_leaf()
3623 free_extent_buffer(path->nodes[0]); in split_leaf()
3624 path->nodes[0] = right; in split_leaf()
3625 path->slots[0] = 0; in split_leaf()
3626 path->slots[1] += 1; in split_leaf()
3629 insert_ptr(trans, path, &disk_key, in split_leaf()
3630 right->start, path->slots[1], 1); in split_leaf()
3631 btrfs_tree_unlock(path->nodes[0]); in split_leaf()
3632 free_extent_buffer(path->nodes[0]); in split_leaf()
3633 path->nodes[0] = right; in split_leaf()
3634 path->slots[0] = 0; in split_leaf()
3635 if (path->slots[1] == 0) in split_leaf()
3636 fixup_low_keys(path, &disk_key, 1); in split_leaf()
3646 copy_for_split(trans, path, l, right, slot, mid, nritems); in split_leaf()
3657 push_for_double_split(trans, root, path, data_size); in split_leaf()
3659 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) in split_leaf()
3666 struct btrfs_path *path, int ins_len) in setup_leaf_for_split() argument
3675 leaf = path->nodes[0]; in setup_leaf_for_split()
3676 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in setup_leaf_for_split()
3684 item_size = btrfs_item_size(leaf, path->slots[0]); in setup_leaf_for_split()
3686 fi = btrfs_item_ptr(leaf, path->slots[0], in setup_leaf_for_split()
3690 btrfs_release_path(path); in setup_leaf_for_split()
3692 path->keep_locks = 1; in setup_leaf_for_split()
3693 path->search_for_split = 1; in setup_leaf_for_split()
3694 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); in setup_leaf_for_split()
3695 path->search_for_split = 0; in setup_leaf_for_split()
3702 leaf = path->nodes[0]; in setup_leaf_for_split()
3704 if (item_size != btrfs_item_size(leaf, path->slots[0])) in setup_leaf_for_split()
3708 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len) in setup_leaf_for_split()
3712 fi = btrfs_item_ptr(leaf, path->slots[0], in setup_leaf_for_split()
3718 ret = split_leaf(trans, root, &key, path, ins_len, 1); in setup_leaf_for_split()
3722 path->keep_locks = 0; in setup_leaf_for_split()
3723 btrfs_unlock_up_safe(path, 1); in setup_leaf_for_split()
3726 path->keep_locks = 0; in setup_leaf_for_split()
3730 static noinline int split_item(struct btrfs_path *path, in split_item() argument
3742 leaf = path->nodes[0]; in split_item()
3745 orig_slot = path->slots[0]; in split_item()
3746 orig_offset = btrfs_item_offset(leaf, path->slots[0]); in split_item()
3747 item_size = btrfs_item_size(leaf, path->slots[0]); in split_item()
3754 path->slots[0]), item_size); in split_item()
3756 slot = path->slots[0] + 1; in split_item()
3779 btrfs_item_ptr_offset(leaf, path->slots[0]), in split_item()
3798 * The path may be released by this operation. After
3799 * the split, the path is pointing to the old item. The
3810 struct btrfs_path *path, in btrfs_split_item() argument
3815 ret = setup_leaf_for_split(trans, root, path, in btrfs_split_item()
3820 ret = split_item(path, new_key, split_offset); in btrfs_split_item()
3825 * make the item pointed to by the path smaller. new_size indicates
3830 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end) in btrfs_truncate_item() argument
3842 leaf = path->nodes[0]; in btrfs_truncate_item()
3843 slot = path->slots[0]; in btrfs_truncate_item()
3908 fixup_low_keys(path, &disk_key, 1); in btrfs_truncate_item()
3921 * make the item pointed to by the path bigger, data_size is the added size.
3923 void btrfs_extend_item(struct btrfs_path *path, u32 data_size) in btrfs_extend_item() argument
3934 leaf = path->nodes[0]; in btrfs_extend_item()
3943 slot = path->slots[0]; in btrfs_extend_item()
3988 * @path: points to the leaf/slot where we are going to insert new items
3991 static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, in setup_items_for_insert() argument
4009 if (path->slots[0] == 0) { in setup_items_for_insert()
4011 fixup_low_keys(path, &disk_key, 1); in setup_items_for_insert()
4013 btrfs_unlock_up_safe(path, 1); in setup_items_for_insert()
4015 leaf = path->nodes[0]; in setup_items_for_insert()
4016 slot = path->slots[0]; in setup_items_for_insert()
4086 * @path: A path pointing to the target leaf and slot.
4091 struct btrfs_path *path, in btrfs_setup_item_for_insert() argument
4102 setup_items_for_insert(root, path, &batch); in btrfs_setup_item_for_insert()
4107 * This does all the path init required, making room in the tree if needed.
4111 struct btrfs_path *path, in btrfs_insert_empty_items() argument
4119 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1); in btrfs_insert_empty_items()
4125 slot = path->slots[0]; in btrfs_insert_empty_items()
4128 setup_items_for_insert(root, path, batch); in btrfs_insert_empty_items()
4134 * This does all the path init required, making room in the tree if needed.
4141 struct btrfs_path *path; in btrfs_insert_item() local
4145 path = btrfs_alloc_path(); in btrfs_insert_item()
4146 if (!path) in btrfs_insert_item()
4148 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); in btrfs_insert_item()
4150 leaf = path->nodes[0]; in btrfs_insert_item()
4151 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); in btrfs_insert_item()
4155 btrfs_free_path(path); in btrfs_insert_item()
4169 struct btrfs_path *path, in btrfs_duplicate_item() argument
4176 leaf = path->nodes[0]; in btrfs_duplicate_item()
4177 item_size = btrfs_item_size(leaf, path->slots[0]); in btrfs_duplicate_item()
4178 ret = setup_leaf_for_split(trans, root, path, in btrfs_duplicate_item()
4183 path->slots[0]++; in btrfs_duplicate_item()
4184 btrfs_setup_item_for_insert(root, path, new_key, item_size); in btrfs_duplicate_item()
4185 leaf = path->nodes[0]; in btrfs_duplicate_item()
4187 btrfs_item_ptr_offset(leaf, path->slots[0]), in btrfs_duplicate_item()
4188 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), in btrfs_duplicate_item()
4199 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, in del_ptr() argument
4202 struct extent_buffer *parent = path->nodes[level]; in del_ptr()
4234 fixup_low_keys(path, &disk_key, level + 1); in del_ptr()
4240 * a helper function to delete the leaf pointed to by path->slots[1] and
4241 * path->nodes[1].
4243 * This deletes the pointer in path->nodes[1] and frees the leaf
4246 * The path must have already been setup for deleting the leaf, including
4247 * all the proper balancing. path->nodes[1] must be locked.
4251 struct btrfs_path *path, in btrfs_del_leaf() argument
4255 del_ptr(root, path, 1, path->slots[1]); in btrfs_del_leaf()
4261 btrfs_unlock_up_safe(path, 0); in btrfs_del_leaf()
4270 * delete the item at the leaf level in path. If that empties
4274 struct btrfs_path *path, int slot, int nr) in btrfs_del_items() argument
4282 leaf = path->nodes[0]; in btrfs_del_items()
4322 btrfs_del_leaf(trans, root, path, leaf); in btrfs_del_items()
4330 fixup_low_keys(path, &disk_key, 1); in btrfs_del_items()
4344 /* push_leaf_left fixes the path. in btrfs_del_items()
4345 * make sure the path still points to our leaf in btrfs_del_items()
4348 slot = path->slots[1]; in btrfs_del_items()
4356 wret = push_leaf_left(trans, root, path, 0, in btrfs_del_items()
4361 if (path->nodes[0] == leaf && in btrfs_del_items()
4375 wret = push_leaf_right(trans, root, path, 0, in btrfs_del_items()
4382 path->slots[1] = slot; in btrfs_del_items()
4383 btrfs_del_leaf(trans, root, path, leaf); in btrfs_del_items()
4387 /* if we're still in the path, make sure in btrfs_del_items()
4392 if (path->nodes[0] == leaf) in btrfs_del_items()
4408 * This may release the path, and so you may lose any locks held at the
4411 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) in btrfs_prev_leaf() argument
4417 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); in btrfs_prev_leaf()
4432 btrfs_release_path(path); in btrfs_prev_leaf()
4433 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); in btrfs_prev_leaf()
4436 btrfs_item_key(path->nodes[0], &found_key, 0); in btrfs_prev_leaf()
4440 * before we released our path. And after we released our path, that in btrfs_prev_leaf()
4460 * key and get a writable path.
4462 * This honors path->lowest_level to prevent descent past a given level
4473 struct btrfs_path *path, in btrfs_search_forward() argument
4483 int keep_locks = path->keep_locks; in btrfs_search_forward()
4485 ASSERT(!path->nowait); in btrfs_search_forward()
4486 path->keep_locks = 1; in btrfs_search_forward()
4490 WARN_ON(path->nodes[level]); in btrfs_search_forward()
4491 path->nodes[level] = cur; in btrfs_search_forward()
4492 path->locks[level] = BTRFS_READ_LOCK; in btrfs_search_forward()
4507 /* at the lowest level, we're done, setup the path and exit */ in btrfs_search_forward()
4508 if (level == path->lowest_level) { in btrfs_search_forward()
4512 path->slots[level] = slot; in btrfs_search_forward()
4538 path->slots[level] = slot; in btrfs_search_forward()
4539 sret = btrfs_find_next_key(root, path, min_key, level, in btrfs_search_forward()
4542 btrfs_release_path(path); in btrfs_search_forward()
4550 path->slots[level] = slot; in btrfs_search_forward()
4551 if (level == path->lowest_level) { in btrfs_search_forward()
4563 path->locks[level - 1] = BTRFS_READ_LOCK; in btrfs_search_forward()
4564 path->nodes[level - 1] = cur; in btrfs_search_forward()
4565 unlock_up(path, level, 1, 0, NULL); in btrfs_search_forward()
4568 path->keep_locks = keep_locks; in btrfs_search_forward()
4570 btrfs_unlock_up_safe(path, path->lowest_level + 1); in btrfs_search_forward()
4578 * and fixup the path. It looks for and returns the next key in the
4579 * tree based on the current path and the min_trans parameters.
4584 * path->keep_locks should be set to 1 on the search made before
4587 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, in btrfs_find_next_key() argument
4593 WARN_ON(!path->keep_locks && !path->skip_locking); in btrfs_find_next_key()
4595 if (!path->nodes[level]) in btrfs_find_next_key()
4598 slot = path->slots[level] + 1; in btrfs_find_next_key()
4599 c = path->nodes[level]; in btrfs_find_next_key()
4606 !path->nodes[level + 1]) in btrfs_find_next_key()
4609 if (path->locks[level + 1] || path->skip_locking) { in btrfs_find_next_key()
4620 orig_lowest = path->lowest_level; in btrfs_find_next_key()
4621 btrfs_release_path(path); in btrfs_find_next_key()
4622 path->lowest_level = level; in btrfs_find_next_key()
4623 ret = btrfs_search_slot(NULL, root, &cur_key, path, in btrfs_find_next_key()
4625 path->lowest_level = orig_lowest; in btrfs_find_next_key()
4629 c = path->nodes[level]; in btrfs_find_next_key()
4630 slot = path->slots[level]; in btrfs_find_next_key()
4652 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, in btrfs_next_old_leaf() argument
4671 ASSERT(!path->nowait); in btrfs_next_old_leaf()
4673 nritems = btrfs_header_nritems(path->nodes[0]); in btrfs_next_old_leaf()
4677 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); in btrfs_next_old_leaf()
4681 btrfs_release_path(path); in btrfs_next_old_leaf()
4683 path->keep_locks = 1; in btrfs_next_old_leaf()
4686 ret = btrfs_search_old_slot(root, &key, path, time_seq); in btrfs_next_old_leaf()
4688 if (path->need_commit_sem) { in btrfs_next_old_leaf()
4689 path->need_commit_sem = 0; in btrfs_next_old_leaf()
4691 if (path->nowait) { in btrfs_next_old_leaf()
4700 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); in btrfs_next_old_leaf()
4702 path->keep_locks = 0; in btrfs_next_old_leaf()
4707 nritems = btrfs_header_nritems(path->nodes[0]); in btrfs_next_old_leaf()
4709 * by releasing the path above we dropped all our locks. A balance in btrfs_next_old_leaf()
4712 * advance the path if there are now more items available. in btrfs_next_old_leaf()
4714 if (nritems > 0 && path->slots[0] < nritems - 1) { in btrfs_next_old_leaf()
4716 path->slots[0]++; in btrfs_next_old_leaf()
4722 * - after releasing the path above, someone has removed the item that in btrfs_next_old_leaf()
4730 * with ret > 0, the key isn't found, the path points to the slot in btrfs_next_old_leaf()
4731 * where it should be inserted, so the path->slots[0] item must be the in btrfs_next_old_leaf()
4734 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { in btrfs_next_old_leaf()
4740 if (!path->nodes[level]) { in btrfs_next_old_leaf()
4745 slot = path->slots[level] + 1; in btrfs_next_old_leaf()
4746 c = path->nodes[level]; in btrfs_next_old_leaf()
4763 if (path->locks[level]) { in btrfs_next_old_leaf()
4764 btrfs_tree_read_unlock(path->nodes[i]); in btrfs_next_old_leaf()
4765 path->locks[i] = 0; in btrfs_next_old_leaf()
4767 free_extent_buffer(path->nodes[i]); in btrfs_next_old_leaf()
4768 path->nodes[i] = NULL; in btrfs_next_old_leaf()
4772 ret = read_block_for_search(root, path, &next, level, in btrfs_next_old_leaf()
4774 if (ret == -EAGAIN && !path->nowait) in btrfs_next_old_leaf()
4778 btrfs_release_path(path); in btrfs_next_old_leaf()
4782 if (!path->skip_locking) { in btrfs_next_old_leaf()
4784 if (!ret && path->nowait) { in btrfs_next_old_leaf()
4797 btrfs_release_path(path); in btrfs_next_old_leaf()
4806 path->slots[level] = slot; in btrfs_next_old_leaf()
4809 path->nodes[level] = next; in btrfs_next_old_leaf()
4810 path->slots[level] = 0; in btrfs_next_old_leaf()
4811 if (!path->skip_locking) in btrfs_next_old_leaf()
4812 path->locks[level] = BTRFS_READ_LOCK; in btrfs_next_old_leaf()
4816 ret = read_block_for_search(root, path, &next, level, in btrfs_next_old_leaf()
4818 if (ret == -EAGAIN && !path->nowait) in btrfs_next_old_leaf()
4822 btrfs_release_path(path); in btrfs_next_old_leaf()
4826 if (!path->skip_locking) { in btrfs_next_old_leaf()
4827 if (path->nowait) { in btrfs_next_old_leaf()
4839 unlock_up(path, 0, 1, 0, NULL); in btrfs_next_old_leaf()
4843 path->need_commit_sem = 1; in btrfs_next_old_leaf()
4844 ret2 = finish_need_commit_sem_search(path); in btrfs_next_old_leaf()
4860 struct btrfs_path *path, u64 min_objectid, in btrfs_previous_item() argument
4869 if (path->slots[0] == 0) { in btrfs_previous_item()
4870 ret = btrfs_prev_leaf(root, path); in btrfs_previous_item()
4874 path->slots[0]--; in btrfs_previous_item()
4876 leaf = path->nodes[0]; in btrfs_previous_item()
4880 if (path->slots[0] == nritems) in btrfs_previous_item()
4881 path->slots[0]--; in btrfs_previous_item()
4883 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_previous_item()
4902 struct btrfs_path *path, u64 min_objectid) in btrfs_previous_extent_item() argument
4910 if (path->slots[0] == 0) { in btrfs_previous_extent_item()
4911 ret = btrfs_prev_leaf(root, path); in btrfs_previous_extent_item()
4915 path->slots[0]--; in btrfs_previous_extent_item()
4917 leaf = path->nodes[0]; in btrfs_previous_extent_item()
4921 if (path->slots[0] == nritems) in btrfs_previous_extent_item()
4922 path->slots[0]--; in btrfs_previous_extent_item()
4924 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_previous_extent_item()