Lines Matching +full:no +full:- +full:memory +full:- +full:wc
1 // SPDX-License-Identifier: GPL-2.0
20 #include "tree-log.h"
21 #include "disk-io.h"
22 #include "print-tree.h"
26 #include "free-space-cache.h"
27 #include "free-space-tree.h"
30 #include "ref-verify.h"
31 #include "space-info.h"
32 #include "block-rsv.h"
33 #include "delalloc-space.h"
34 #include "block-group.h"
36 #include "rcu-string.h"
38 #include "dev-replace.h"
63 return (cache->flags & bits) == bits; in block_group_bits()
69 u64 end = start + num_bytes - 1; in btrfs_add_excluded_extent()
70 set_extent_bits(&fs_info->excluded_extents, start, end, in btrfs_add_excluded_extent()
77 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_free_excluded_extents()
80 start = cache->start; in btrfs_free_excluded_extents()
81 end = start + cache->length - 1; in btrfs_free_excluded_extents()
83 clear_extent_bits(&fs_info->excluded_extents, start, end, in btrfs_free_excluded_extents()
96 return -ENOMEM; in btrfs_lookup_data_extent()
101 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); in btrfs_lookup_data_extent()
135 offset = fs_info->nodesize; in btrfs_lookup_extent_info()
141 return -ENOMEM; in btrfs_lookup_extent_info()
144 path->skip_locking = 1; in btrfs_lookup_extent_info()
145 path->search_commit_root = 1; in btrfs_lookup_extent_info()
156 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); in btrfs_lookup_extent_info()
161 if (path->slots[0]) { in btrfs_lookup_extent_info()
162 path->slots[0]--; in btrfs_lookup_extent_info()
163 btrfs_item_key_to_cpu(path->nodes[0], &key, in btrfs_lookup_extent_info()
164 path->slots[0]); in btrfs_lookup_extent_info()
167 key.offset == fs_info->nodesize) in btrfs_lookup_extent_info()
173 leaf = path->nodes[0]; in btrfs_lookup_extent_info()
174 item_size = btrfs_item_size_nr(leaf, path->slots[0]); in btrfs_lookup_extent_info()
176 ei = btrfs_item_ptr(leaf, path->slots[0], in btrfs_lookup_extent_info()
181 ret = -EINVAL; in btrfs_lookup_extent_info()
201 delayed_refs = &trans->transaction->delayed_refs; in btrfs_lookup_extent_info()
202 spin_lock(&delayed_refs->lock); in btrfs_lookup_extent_info()
205 if (!mutex_trylock(&head->mutex)) { in btrfs_lookup_extent_info()
206 refcount_inc(&head->refs); in btrfs_lookup_extent_info()
207 spin_unlock(&delayed_refs->lock); in btrfs_lookup_extent_info()
215 mutex_lock(&head->mutex); in btrfs_lookup_extent_info()
216 mutex_unlock(&head->mutex); in btrfs_lookup_extent_info()
220 spin_lock(&head->lock); in btrfs_lookup_extent_info()
221 if (head->extent_op && head->extent_op->update_flags) in btrfs_lookup_extent_info()
222 extent_flags |= head->extent_op->flags_to_set; in btrfs_lookup_extent_info()
226 num_refs += head->ref_mod; in btrfs_lookup_extent_info()
227 spin_unlock(&head->lock); in btrfs_lookup_extent_info()
228 mutex_unlock(&head->mutex); in btrfs_lookup_extent_info()
230 spin_unlock(&delayed_refs->lock); in btrfs_lookup_extent_info()
257 * for pointers in non-shared tree blocks. For a given pointer in a block,
260 * b-tree searching. The full back refs is for pointers in tree blocks not
303 * - multiple snapshots, subvolumes, or different generations in one subvol
304 * - different files inside a single subvolume
305 * - different offsets inside a file (bookend extents in file.c)
309 * - Objectid of the subvolume root
310 * - objectid of the file holding the reference
311 * - original offset in the file
312 * - how many bookend extents
319 * - number of pointers in the tree leaf
336 * - Different subvolumes
368 ASSERT(eb->fs_info); in btrfs_get_extent_inline_ref_type()
374 IS_ALIGNED(offset, eb->fs_info->sectorsize)) in btrfs_get_extent_inline_ref_type()
381 ASSERT(eb->fs_info); in btrfs_get_extent_inline_ref_type()
387 IS_ALIGNED(offset, eb->fs_info->sectorsize)) in btrfs_get_extent_inline_ref_type()
397 btrfs_err(eb->fs_info, in btrfs_get_extent_inline_ref_type()
399 eb->start, (unsigned long)iref, type); in btrfs_get_extent_inline_ref_type()
446 struct btrfs_root *root = trans->fs_info->extent_root; in lookup_extent_data_ref()
453 int err = -ENOENT; in lookup_extent_data_ref()
466 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); in lookup_extent_data_ref()
478 leaf = path->nodes[0]; in lookup_extent_data_ref()
481 if (path->slots[0] >= nritems) { in lookup_extent_data_ref()
488 leaf = path->nodes[0]; in lookup_extent_data_ref()
493 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in lookup_extent_data_ref()
498 ref = btrfs_item_ptr(leaf, path->slots[0], in lookup_extent_data_ref()
510 path->slots[0]++; in lookup_extent_data_ref()
522 struct btrfs_root *root = trans->fs_info->extent_root; in insert_extent_data_ref()
542 if (ret && ret != -EEXIST) in insert_extent_data_ref()
545 leaf = path->nodes[0]; in insert_extent_data_ref()
548 ref = btrfs_item_ptr(leaf, path->slots[0], in insert_extent_data_ref()
559 while (ret == -EEXIST) { in insert_extent_data_ref()
560 ref = btrfs_item_ptr(leaf, path->slots[0], in insert_extent_data_ref()
569 if (ret && ret != -EEXIST) in insert_extent_data_ref()
572 leaf = path->nodes[0]; in insert_extent_data_ref()
574 ref = btrfs_item_ptr(leaf, path->slots[0], in insert_extent_data_ref()
606 leaf = path->nodes[0]; in remove_extent_data_ref()
607 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in remove_extent_data_ref()
610 ref1 = btrfs_item_ptr(leaf, path->slots[0], in remove_extent_data_ref()
614 ref2 = btrfs_item_ptr(leaf, path->slots[0], in remove_extent_data_ref()
618 btrfs_print_v0_err(trans->fs_info); in remove_extent_data_ref()
619 btrfs_abort_transaction(trans, -EINVAL); in remove_extent_data_ref()
620 return -EINVAL; in remove_extent_data_ref()
626 num_refs -= refs_to_drop; in remove_extent_data_ref()
629 ret = btrfs_del_item(trans, trans->fs_info->extent_root, path); in remove_extent_data_ref()
651 leaf = path->nodes[0]; in extent_data_ref_count()
652 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in extent_data_ref_count()
663 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); in extent_data_ref_count()
670 ref1 = btrfs_item_ptr(leaf, path->slots[0], in extent_data_ref_count()
674 ref2 = btrfs_item_ptr(leaf, path->slots[0], in extent_data_ref_count()
688 struct btrfs_root *root = trans->fs_info->extent_root; in lookup_tree_block_ref()
701 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); in lookup_tree_block_ref()
703 ret = -ENOENT; in lookup_tree_block_ref()
724 ret = btrfs_insert_empty_item(trans, trans->fs_info->extent_root, in insert_tree_block_ref()
752 if (!path->nodes[level]) in find_next_key()
754 if (path->slots[level] + 1 >= in find_next_key()
755 btrfs_header_nritems(path->nodes[level])) in find_next_key()
758 btrfs_item_key_to_cpu(path->nodes[level], key, in find_next_key()
759 path->slots[level] + 1); in find_next_key()
761 btrfs_node_key_to_cpu(path->nodes[level], key, in find_next_key()
762 path->slots[level] + 1); in find_next_key()
773 * should be inserted, and -ENOENT is returned.
776 * points to the extent item, and -EAGAIN is returned.
789 struct btrfs_fs_info *fs_info = trans->fs_info; in lookup_inline_extent_backref()
790 struct btrfs_root *root = fs_info->extent_root; in lookup_inline_extent_backref()
814 path->search_for_extension = 1; in lookup_inline_extent_backref()
815 path->keep_locks = 1; in lookup_inline_extent_backref()
817 extra_size = -1; in lookup_inline_extent_backref()
841 if (path->slots[0]) { in lookup_inline_extent_backref()
842 path->slots[0]--; in lookup_inline_extent_backref()
843 btrfs_item_key_to_cpu(path->nodes[0], &key, in lookup_inline_extent_backref()
844 path->slots[0]); in lookup_inline_extent_backref()
860 err = -ENOENT; in lookup_inline_extent_backref()
863 err = -EIO; in lookup_inline_extent_backref()
867 leaf = path->nodes[0]; in lookup_inline_extent_backref()
868 item_size = btrfs_item_size_nr(leaf, path->slots[0]); in lookup_inline_extent_backref()
870 err = -EINVAL; in lookup_inline_extent_backref()
876 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in lookup_inline_extent_backref()
892 err = -ENOENT; in lookup_inline_extent_backref()
901 err = -EUCLEAN; in lookup_inline_extent_backref()
914 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in lookup_inline_extent_backref()
944 if (err == -ENOENT && insert) { in lookup_inline_extent_backref()
947 err = -EAGAIN; in lookup_inline_extent_backref()
952 * there is no corresponding back ref item. in lookup_inline_extent_backref()
959 err = -EAGAIN; in lookup_inline_extent_backref()
966 path->keep_locks = 0; in lookup_inline_extent_backref()
967 path->search_for_extension = 0; in lookup_inline_extent_backref()
993 leaf = path->nodes[0]; in setup_inline_extent_backref()
994 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in setup_inline_extent_backref()
995 item_offset = (unsigned long)iref - (unsigned long)ei; in setup_inline_extent_backref()
1002 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in setup_inline_extent_backref()
1010 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]); in setup_inline_extent_backref()
1011 if (ptr < end - size) in setup_inline_extent_backref()
1013 end - size - ptr); in setup_inline_extent_backref()
1019 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in setup_inline_extent_backref()
1048 if (ret != -ENOENT) in lookup_extent_backref()
1074 struct extent_buffer *leaf = path->nodes[0]; in update_inline_extent_backref()
1085 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in update_inline_extent_backref()
1101 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in update_inline_extent_backref()
1108 BUG_ON(refs_to_mod != -1); in update_inline_extent_backref()
1111 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod); in update_inline_extent_backref()
1122 item_size = btrfs_item_size_nr(leaf, path->slots[0]); in update_inline_extent_backref()
1127 end - ptr - size); in update_inline_extent_backref()
1128 item_size -= size; in update_inline_extent_backref()
1154 btrfs_crit(trans->fs_info, in insert_inline_extent_backref()
1159 btrfs_crit(trans->fs_info, in insert_inline_extent_backref()
1160 "path->slots[0]=%d path->nodes[0]:", path->slots[0]); in insert_inline_extent_backref()
1161 btrfs_print_leaf(path->nodes[0]); in insert_inline_extent_backref()
1163 return -EUCLEAN; in insert_inline_extent_backref()
1167 } else if (ret == -ENOENT) { in insert_inline_extent_backref()
1168 setup_inline_extent_backref(trans->fs_info, path, iref, parent, in insert_inline_extent_backref()
1185 update_inline_extent_backref(path, iref, -refs_to_drop, NULL, in remove_extent_backref()
1192 ret = btrfs_del_item(trans, trans->fs_info->extent_root, path); in remove_extent_backref()
1205 len -= aligned_start - start; in btrfs_issue_discard()
1222 u64 size = sb_start - start; in btrfs_issue_discard()
1234 start += sb_end - start; in btrfs_issue_discard()
1239 bytes_left = end - start; in btrfs_issue_discard()
1248 else if (ret != -EOPNOTSUPP) in btrfs_issue_discard()
1257 bytes_left = end - start; in btrfs_issue_discard()
1271 struct btrfs_device *dev = stripe->dev; in do_discard_extent()
1272 struct btrfs_fs_info *fs_info = dev->fs_info; in do_discard_extent()
1273 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; in do_discard_extent()
1274 u64 phys = stripe->physical; in do_discard_extent()
1275 u64 len = stripe->length; in do_discard_extent()
1288 dev != dev_replace->srcdev) in do_discard_extent()
1294 ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len, in do_discard_extent()
1297 } else if (blk_queue_discard(bdev_get_queue(stripe->dev->bdev))) { in do_discard_extent()
1298 ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded); in do_discard_extent()
1328 num_bytes = end - cur; in btrfs_discard_extent()
1333 * Error can be -ENOMEM, -ENOENT (no such chunk mapping) or in btrfs_discard_extent()
1334 * -EOPNOTSUPP. For any such error, @num_bytes is not updated, in btrfs_discard_extent()
1340 stripe = bbio->stripes; in btrfs_discard_extent()
1341 for (i = 0; i < bbio->num_stripes; i++, stripe++) { in btrfs_discard_extent()
1343 struct btrfs_device *device = stripe->dev; in btrfs_discard_extent()
1345 if (!device->bdev) { in btrfs_discard_extent()
1350 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) in btrfs_discard_extent()
1356 } else if (ret != -EOPNOTSUPP) { in btrfs_discard_extent()
1358 * Logic errors or -ENOMEM, or -EIO, but in btrfs_discard_extent()
1385 if (ret == -EOPNOTSUPP) in btrfs_discard_extent()
1390 /* Can return -ENOMEM */
1394 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_inc_extent_ref()
1397 ASSERT(generic_ref->type != BTRFS_REF_NOT_SET && in btrfs_inc_extent_ref()
1398 generic_ref->action); in btrfs_inc_extent_ref()
1399 BUG_ON(generic_ref->type == BTRFS_REF_METADATA && in btrfs_inc_extent_ref()
1400 generic_ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID); in btrfs_inc_extent_ref()
1402 if (generic_ref->type == BTRFS_REF_METADATA) in btrfs_inc_extent_ref()
1413 * __btrfs_inc_extent_ref - insert backreference for a given extent
1432 * this can be either one of the well-known metadata trees or
1459 u64 bytenr = node->bytenr; in __btrfs_inc_extent_ref()
1460 u64 num_bytes = node->num_bytes; in __btrfs_inc_extent_ref()
1466 return -ENOMEM; in __btrfs_inc_extent_ref()
1472 if ((ret < 0 && ret != -EAGAIN) || !ret) in __btrfs_inc_extent_ref()
1476 * Ok we had -EAGAIN which means we didn't have space to insert and in __btrfs_inc_extent_ref()
1480 leaf = path->nodes[0]; in __btrfs_inc_extent_ref()
1481 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in __btrfs_inc_extent_ref()
1482 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in __btrfs_inc_extent_ref()
1520 ins.objectid = node->bytenr; in run_delayed_data_ref()
1521 ins.offset = node->num_bytes; in run_delayed_data_ref()
1525 trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action); in run_delayed_data_ref()
1527 if (node->type == BTRFS_SHARED_DATA_REF_KEY) in run_delayed_data_ref()
1528 parent = ref->parent; in run_delayed_data_ref()
1529 ref_root = ref->root; in run_delayed_data_ref()
1531 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { in run_delayed_data_ref()
1533 flags |= extent_op->flags_to_set; in run_delayed_data_ref()
1535 flags, ref->objectid, in run_delayed_data_ref()
1536 ref->offset, &ins, in run_delayed_data_ref()
1537 node->ref_mod); in run_delayed_data_ref()
1538 } else if (node->action == BTRFS_ADD_DELAYED_REF) { in run_delayed_data_ref()
1540 ref->objectid, ref->offset, in run_delayed_data_ref()
1541 node->ref_mod, extent_op); in run_delayed_data_ref()
1542 } else if (node->action == BTRFS_DROP_DELAYED_REF) { in run_delayed_data_ref()
1544 ref_root, ref->objectid, in run_delayed_data_ref()
1545 ref->offset, node->ref_mod, in run_delayed_data_ref()
1558 if (extent_op->update_flags) { in __run_delayed_extent_op()
1559 flags |= extent_op->flags_to_set; in __run_delayed_extent_op()
1563 if (extent_op->update_key) { in __run_delayed_extent_op()
1567 btrfs_set_tree_block_key(leaf, bi, &extent_op->key); in __run_delayed_extent_op()
1575 struct btrfs_fs_info *fs_info = trans->fs_info; in run_delayed_extent_op()
1583 int metadata = !extent_op->is_data; in run_delayed_extent_op()
1593 return -ENOMEM; in run_delayed_extent_op()
1595 key.objectid = head->bytenr; in run_delayed_extent_op()
1599 key.offset = extent_op->level; in run_delayed_extent_op()
1602 key.offset = head->num_bytes; in run_delayed_extent_op()
1606 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1); in run_delayed_extent_op()
1613 if (path->slots[0] > 0) { in run_delayed_extent_op()
1614 path->slots[0]--; in run_delayed_extent_op()
1615 btrfs_item_key_to_cpu(path->nodes[0], &key, in run_delayed_extent_op()
1616 path->slots[0]); in run_delayed_extent_op()
1617 if (key.objectid == head->bytenr && in run_delayed_extent_op()
1619 key.offset == head->num_bytes) in run_delayed_extent_op()
1626 key.objectid = head->bytenr; in run_delayed_extent_op()
1627 key.offset = head->num_bytes; in run_delayed_extent_op()
1632 err = -EIO; in run_delayed_extent_op()
1637 leaf = path->nodes[0]; in run_delayed_extent_op()
1638 item_size = btrfs_item_size_nr(leaf, path->slots[0]); in run_delayed_extent_op()
1641 err = -EINVAL; in run_delayed_extent_op()
1647 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in run_delayed_extent_op()
1667 trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action); in run_delayed_tree_ref()
1669 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) in run_delayed_tree_ref()
1670 parent = ref->parent; in run_delayed_tree_ref()
1671 ref_root = ref->root; in run_delayed_tree_ref()
1673 if (node->ref_mod != 1) { in run_delayed_tree_ref()
1674 btrfs_err(trans->fs_info, in run_delayed_tree_ref()
1676 node->bytenr, node->ref_mod, node->action, ref_root, in run_delayed_tree_ref()
1678 return -EIO; in run_delayed_tree_ref()
1680 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { in run_delayed_tree_ref()
1681 BUG_ON(!extent_op || !extent_op->update_flags); in run_delayed_tree_ref()
1683 } else if (node->action == BTRFS_ADD_DELAYED_REF) { in run_delayed_tree_ref()
1685 ref->level, 0, 1, extent_op); in run_delayed_tree_ref()
1686 } else if (node->action == BTRFS_DROP_DELAYED_REF) { in run_delayed_tree_ref()
1688 ref->level, 0, 1, extent_op); in run_delayed_tree_ref()
1705 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); in run_one_delayed_ref()
1709 if (node->type == BTRFS_TREE_BLOCK_REF_KEY || in run_one_delayed_ref()
1710 node->type == BTRFS_SHARED_BLOCK_REF_KEY) in run_one_delayed_ref()
1713 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || in run_one_delayed_ref()
1714 node->type == BTRFS_SHARED_DATA_REF_KEY) in run_one_delayed_ref()
1720 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); in run_one_delayed_ref()
1729 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) in select_delayed_ref()
1738 if (!list_empty(&head->ref_add_list)) in select_delayed_ref()
1739 return list_first_entry(&head->ref_add_list, in select_delayed_ref()
1742 ref = rb_entry(rb_first_cached(&head->ref_tree), in select_delayed_ref()
1744 ASSERT(list_empty(&ref->add_list)); in select_delayed_ref()
1751 spin_lock(&delayed_refs->lock); in unselect_delayed_ref_head()
1752 head->processing = 0; in unselect_delayed_ref_head()
1753 delayed_refs->num_heads_ready++; in unselect_delayed_ref_head()
1754 spin_unlock(&delayed_refs->lock); in unselect_delayed_ref_head()
1761 struct btrfs_delayed_extent_op *extent_op = head->extent_op; in cleanup_extent_op()
1766 if (head->must_insert_reserved) { in cleanup_extent_op()
1767 head->extent_op = NULL; in cleanup_extent_op()
1783 head->extent_op = NULL; in run_and_cleanup_extent_op()
1784 spin_unlock(&head->lock); in run_and_cleanup_extent_op()
1800 if (head->total_ref_mod < 0 && head->is_data) { in btrfs_cleanup_ref_head_accounting()
1801 spin_lock(&delayed_refs->lock); in btrfs_cleanup_ref_head_accounting()
1802 delayed_refs->pending_csums -= head->num_bytes; in btrfs_cleanup_ref_head_accounting()
1803 spin_unlock(&delayed_refs->lock); in btrfs_cleanup_ref_head_accounting()
1804 nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes); in btrfs_cleanup_ref_head_accounting()
1814 struct btrfs_fs_info *fs_info = trans->fs_info; in cleanup_ref_head()
1818 delayed_refs = &trans->transaction->delayed_refs; in cleanup_ref_head()
1830 * Need to drop our head ref lock and re-acquire the delayed ref lock in cleanup_ref_head()
1831 * and then re-check to make sure nobody got added. in cleanup_ref_head()
1833 spin_unlock(&head->lock); in cleanup_ref_head()
1834 spin_lock(&delayed_refs->lock); in cleanup_ref_head()
1835 spin_lock(&head->lock); in cleanup_ref_head()
1836 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) { in cleanup_ref_head()
1837 spin_unlock(&head->lock); in cleanup_ref_head()
1838 spin_unlock(&delayed_refs->lock); in cleanup_ref_head()
1842 spin_unlock(&head->lock); in cleanup_ref_head()
1843 spin_unlock(&delayed_refs->lock); in cleanup_ref_head()
1845 if (head->must_insert_reserved) { in cleanup_ref_head()
1846 btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1); in cleanup_ref_head()
1847 if (head->is_data) { in cleanup_ref_head()
1848 ret = btrfs_del_csums(trans, fs_info->csum_root, in cleanup_ref_head()
1849 head->bytenr, head->num_bytes); in cleanup_ref_head()
1865 &trans->transaction->delayed_refs; in btrfs_obtain_ref_head()
1869 spin_lock(&delayed_refs->lock); in btrfs_obtain_ref_head()
1872 spin_unlock(&delayed_refs->lock); in btrfs_obtain_ref_head()
1881 spin_unlock(&delayed_refs->lock); in btrfs_obtain_ref_head()
1888 if (ret == -EAGAIN) in btrfs_obtain_ref_head()
1889 head = ERR_PTR(-EAGAIN); in btrfs_obtain_ref_head()
1898 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_run_delayed_refs_for_head()
1905 delayed_refs = &trans->transaction->delayed_refs; in btrfs_run_delayed_refs_for_head()
1907 lockdep_assert_held(&locked_ref->mutex); in btrfs_run_delayed_refs_for_head()
1908 lockdep_assert_held(&locked_ref->lock); in btrfs_run_delayed_refs_for_head()
1911 if (ref->seq && in btrfs_run_delayed_refs_for_head()
1912 btrfs_check_delayed_seq(fs_info, ref->seq)) { in btrfs_run_delayed_refs_for_head()
1913 spin_unlock(&locked_ref->lock); in btrfs_run_delayed_refs_for_head()
1915 return -EAGAIN; in btrfs_run_delayed_refs_for_head()
1919 ref->in_tree = 0; in btrfs_run_delayed_refs_for_head()
1920 rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree); in btrfs_run_delayed_refs_for_head()
1921 RB_CLEAR_NODE(&ref->ref_node); in btrfs_run_delayed_refs_for_head()
1922 if (!list_empty(&ref->add_list)) in btrfs_run_delayed_refs_for_head()
1923 list_del(&ref->add_list); in btrfs_run_delayed_refs_for_head()
1928 switch (ref->action) { in btrfs_run_delayed_refs_for_head()
1931 locked_ref->ref_mod -= ref->ref_mod; in btrfs_run_delayed_refs_for_head()
1934 locked_ref->ref_mod += ref->ref_mod; in btrfs_run_delayed_refs_for_head()
1939 atomic_dec(&delayed_refs->num_entries); in btrfs_run_delayed_refs_for_head()
1945 must_insert_reserved = locked_ref->must_insert_reserved; in btrfs_run_delayed_refs_for_head()
1946 locked_ref->must_insert_reserved = 0; in btrfs_run_delayed_refs_for_head()
1948 extent_op = locked_ref->extent_op; in btrfs_run_delayed_refs_for_head()
1949 locked_ref->extent_op = NULL; in btrfs_run_delayed_refs_for_head()
1950 spin_unlock(&locked_ref->lock); in btrfs_run_delayed_refs_for_head()
1967 spin_lock(&locked_ref->lock); in btrfs_run_delayed_refs_for_head()
1976 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
1981 struct btrfs_fs_info *fs_info = trans->fs_info; in __btrfs_run_delayed_refs()
1989 delayed_refs = &trans->transaction->delayed_refs; in __btrfs_run_delayed_refs()
1994 if (PTR_ERR(locked_ref) == -EAGAIN) { in __btrfs_run_delayed_refs()
2006 * finish. If we merged anything we need to re-loop so we can in __btrfs_run_delayed_refs()
2014 spin_lock(&locked_ref->lock); in __btrfs_run_delayed_refs()
2019 if (ret < 0 && ret != -EAGAIN) { in __btrfs_run_delayed_refs()
2042 * returned -EAGAIN, meaning we need to select another head in __btrfs_run_delayed_refs()
2047 } while ((nr != -1 && count < nr) || locked_ref); in __btrfs_run_delayed_refs()
2052 * accounting, no actual extent tree updates. in __btrfs_run_delayed_refs()
2062 spin_lock(&delayed_refs->lock); in __btrfs_run_delayed_refs()
2063 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime; in __btrfs_run_delayed_refs()
2064 fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */ in __btrfs_run_delayed_refs()
2065 spin_unlock(&delayed_refs->lock); in __btrfs_run_delayed_refs()
2078 struct rb_node *n = root->rb_node; in find_middle()
2087 first = entry->bytenr; in find_middle()
2092 last = entry->bytenr; in find_middle()
2094 n = root->rb_node; in find_middle()
2098 WARN_ON(!entry->in_tree); in find_middle()
2100 middle = entry->bytenr; in find_middle()
2103 n = n->rb_left; in find_middle()
2105 n = n->rb_right; in find_middle()
2107 alt = 1 - alt; in find_middle()
2126 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_run_delayed_refs()
2131 int run_all = count == (unsigned long)-1; in btrfs_run_delayed_refs()
2137 if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags)) in btrfs_run_delayed_refs()
2140 delayed_refs = &trans->transaction->delayed_refs; in btrfs_run_delayed_refs()
2142 count = delayed_refs->num_heads_ready; in btrfs_run_delayed_refs()
2146 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); in btrfs_run_delayed_refs()
2157 spin_lock(&delayed_refs->lock); in btrfs_run_delayed_refs()
2158 node = rb_first_cached(&delayed_refs->href_root); in btrfs_run_delayed_refs()
2160 spin_unlock(&delayed_refs->lock); in btrfs_run_delayed_refs()
2165 refcount_inc(&head->refs); in btrfs_run_delayed_refs()
2166 spin_unlock(&delayed_refs->lock); in btrfs_run_delayed_refs()
2169 mutex_lock(&head->mutex); in btrfs_run_delayed_refs()
2170 mutex_unlock(&head->mutex); in btrfs_run_delayed_refs()
2189 return -ENOMEM; in btrfs_set_disk_extent_flags()
2191 extent_op->flags_to_set = flags; in btrfs_set_disk_extent_flags()
2192 extent_op->update_flags = true; in btrfs_set_disk_extent_flags()
2193 extent_op->update_key = false; in btrfs_set_disk_extent_flags()
2194 extent_op->is_data = is_data ? true : false; in btrfs_set_disk_extent_flags()
2195 extent_op->level = level; in btrfs_set_disk_extent_flags()
2197 ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, extent_op); in btrfs_set_disk_extent_flags()
2215 spin_lock(&root->fs_info->trans_lock); in check_delayed_ref()
2216 cur_trans = root->fs_info->running_transaction; in check_delayed_ref()
2218 refcount_inc(&cur_trans->use_count); in check_delayed_ref()
2219 spin_unlock(&root->fs_info->trans_lock); in check_delayed_ref()
2223 delayed_refs = &cur_trans->delayed_refs; in check_delayed_ref()
2224 spin_lock(&delayed_refs->lock); in check_delayed_ref()
2227 spin_unlock(&delayed_refs->lock); in check_delayed_ref()
2232 if (!mutex_trylock(&head->mutex)) { in check_delayed_ref()
2233 refcount_inc(&head->refs); in check_delayed_ref()
2234 spin_unlock(&delayed_refs->lock); in check_delayed_ref()
2242 mutex_lock(&head->mutex); in check_delayed_ref()
2243 mutex_unlock(&head->mutex); in check_delayed_ref()
2246 return -EAGAIN; in check_delayed_ref()
2248 spin_unlock(&delayed_refs->lock); in check_delayed_ref()
2250 spin_lock(&head->lock); in check_delayed_ref()
2255 for (node = rb_first_cached(&head->ref_tree); node; in check_delayed_ref()
2259 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) { in check_delayed_ref()
2270 if (data_ref->root != root->root_key.objectid || in check_delayed_ref()
2271 data_ref->objectid != objectid || in check_delayed_ref()
2272 data_ref->offset != offset) { in check_delayed_ref()
2277 spin_unlock(&head->lock); in check_delayed_ref()
2278 mutex_unlock(&head->mutex); in check_delayed_ref()
2288 struct btrfs_fs_info *fs_info = root->fs_info; in check_committed_ref()
2289 struct btrfs_root *extent_root = fs_info->extent_root; in check_committed_ref()
2300 key.offset = (u64)-1; in check_committed_ref()
2308 ret = -ENOENT; in check_committed_ref()
2309 if (path->slots[0] == 0) in check_committed_ref()
2312 path->slots[0]--; in check_committed_ref()
2313 leaf = path->nodes[0]; in check_committed_ref()
2314 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in check_committed_ref()
2320 item_size = btrfs_item_size_nr(leaf, path->slots[0]); in check_committed_ref()
2321 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in check_committed_ref()
2334 btrfs_root_last_snapshot(&root->root_item))) in check_committed_ref()
2344 ref = (struct btrfs_extent_data_ref *)(&iref->offset); in check_committed_ref()
2348 root->root_key.objectid || in check_committed_ref()
2366 return -ENOMEM; in btrfs_cross_ref_exist()
2371 if (ret && ret != -ENOENT) in btrfs_cross_ref_exist()
2375 } while (ret == -EAGAIN); in btrfs_cross_ref_exist()
2379 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) in btrfs_cross_ref_exist()
2389 struct btrfs_fs_info *fs_info = root->fs_info; in __btrfs_mod_ref()
2411 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0) in __btrfs_mod_ref()
2415 parent = buf->start; in __btrfs_mod_ref()
2438 key.offset -= btrfs_file_extent_offset(buf, fi); in __btrfs_mod_ref()
2441 generic_ref.real_root = root->root_key.objectid; in __btrfs_mod_ref()
2453 num_bytes = fs_info->nodesize; in __btrfs_mod_ref()
2456 generic_ref.real_root = root->root_key.objectid; in __btrfs_mod_ref()
2457 btrfs_init_tree_ref(&generic_ref, level - 1, ref_root); in __btrfs_mod_ref()
2486 struct btrfs_fs_info *fs_info = root->fs_info; in get_alloc_profile_by_root()
2492 else if (root == fs_info->chunk_root) in get_alloc_profile_by_root()
2506 spin_lock(&fs_info->block_group_cache_lock); in first_logical_byte()
2507 bytenr = fs_info->first_logical_byte; in first_logical_byte()
2508 spin_unlock(&fs_info->block_group_cache_lock); in first_logical_byte()
2510 if (bytenr < (u64)-1) in first_logical_byte()
2517 bytenr = cache->start; in first_logical_byte()
2527 struct btrfs_fs_info *fs_info = cache->fs_info; in pin_down_extent()
2529 spin_lock(&cache->space_info->lock); in pin_down_extent()
2530 spin_lock(&cache->lock); in pin_down_extent()
2531 cache->pinned += num_bytes; in pin_down_extent()
2532 btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info, in pin_down_extent()
2535 cache->reserved -= num_bytes; in pin_down_extent()
2536 cache->space_info->bytes_reserved -= num_bytes; in pin_down_extent()
2538 spin_unlock(&cache->lock); in pin_down_extent()
2539 spin_unlock(&cache->space_info->lock); in pin_down_extent()
2541 set_extent_dirty(&trans->transaction->pinned_extents, bytenr, in pin_down_extent()
2542 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); in pin_down_extent()
2551 cache = btrfs_lookup_block_group(trans->fs_info, bytenr); in btrfs_pin_extent()
2569 cache = btrfs_lookup_block_group(trans->fs_info, bytenr); in btrfs_pin_extent_for_log_replay()
2571 return -EINVAL; in btrfs_pin_extent_for_log_replay()
2605 return -EINVAL; in __exclude_logged_extent()
2624 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_exclude_logged_extents()
2657 atomic_inc(&bg->reservations); in btrfs_inc_block_group_reservations()
2674 if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { in fetch_cluster_info()
2675 ret = &fs_info->meta_alloc_cluster; in fetch_cluster_info()
2680 } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && in fetch_cluster_info()
2683 ret = &fs_info->data_alloc_cluster; in fetch_cluster_info()
2695 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; in unpin_extent_range()
2705 start >= cache->start + cache->length) { in unpin_extent_range()
2713 cache->space_info, in unpin_extent_range()
2718 len = cache->start + cache->length - start; in unpin_extent_range()
2719 len = min(len, end + 1 - start); in unpin_extent_range()
2721 down_read(&fs_info->commit_root_sem); in unpin_extent_range()
2722 if (start < cache->last_byte_to_unpin && return_free_space) { in unpin_extent_range()
2723 u64 add_len = min(len, cache->last_byte_to_unpin - start); in unpin_extent_range()
2727 up_read(&fs_info->commit_root_sem); in unpin_extent_range()
2731 space_info = cache->space_info; in unpin_extent_range()
2739 if (cluster && cluster->fragmented && in unpin_extent_range()
2741 spin_lock(&cluster->lock); in unpin_extent_range()
2742 cluster->fragmented = 0; in unpin_extent_range()
2743 spin_unlock(&cluster->lock); in unpin_extent_range()
2746 spin_lock(&space_info->lock); in unpin_extent_range()
2747 spin_lock(&cache->lock); in unpin_extent_range()
2748 cache->pinned -= len; in unpin_extent_range()
2749 btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len); in unpin_extent_range()
2750 space_info->max_extent_size = 0; in unpin_extent_range()
2751 if (cache->ro) { in unpin_extent_range()
2752 space_info->bytes_readonly += len; in unpin_extent_range()
2756 space_info->bytes_zone_unusable += len; in unpin_extent_range()
2759 spin_unlock(&cache->lock); in unpin_extent_range()
2761 global_rsv->space_info == space_info) { in unpin_extent_range()
2764 spin_lock(&global_rsv->lock); in unpin_extent_range()
2765 if (!global_rsv->full) { in unpin_extent_range()
2766 to_add = min(len, global_rsv->size - in unpin_extent_range()
2767 global_rsv->reserved); in unpin_extent_range()
2768 global_rsv->reserved += to_add; in unpin_extent_range()
2771 if (global_rsv->reserved >= global_rsv->size) in unpin_extent_range()
2772 global_rsv->full = 1; in unpin_extent_range()
2773 len -= to_add; in unpin_extent_range()
2775 spin_unlock(&global_rsv->lock); in unpin_extent_range()
2780 spin_unlock(&space_info->lock); in unpin_extent_range()
2790 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_finish_extent_commit()
2798 unpin = &trans->transaction->pinned_extents; in btrfs_finish_extent_commit()
2803 mutex_lock(&fs_info->unused_bg_unpin_mutex); in btrfs_finish_extent_commit()
2807 mutex_unlock(&fs_info->unused_bg_unpin_mutex); in btrfs_finish_extent_commit()
2813 end + 1 - start, NULL); in btrfs_finish_extent_commit()
2817 mutex_unlock(&fs_info->unused_bg_unpin_mutex); in btrfs_finish_extent_commit()
2823 btrfs_discard_calc_delay(&fs_info->discard_ctl); in btrfs_finish_extent_commit()
2824 btrfs_discard_schedule_work(&fs_info->discard_ctl, true); in btrfs_finish_extent_commit()
2832 deleted_bgs = &trans->transaction->deleted_bgs; in btrfs_finish_extent_commit()
2836 ret = -EROFS; in btrfs_finish_extent_commit()
2839 block_group->start, in btrfs_finish_extent_commit()
2840 block_group->length, in btrfs_finish_extent_commit()
2843 list_del_init(&block_group->bg_list); in btrfs_finish_extent_commit()
2878 * node->bytenr = 13631488
2879 * node->num_bytes = 1048576
2903 * node->bytenr = 13631488
2904 * node->num_bytes = 1048576
2923 struct btrfs_fs_info *info = trans->fs_info; in __btrfs_free_extent()
2926 struct btrfs_root *extent_root = info->extent_root; in __btrfs_free_extent()
2937 u64 bytenr = node->bytenr; in __btrfs_free_extent()
2938 u64 num_bytes = node->num_bytes; in __btrfs_free_extent()
2944 return -ENOMEM; in __btrfs_free_extent()
2951 node->bytenr, refs_to_drop); in __btrfs_free_extent()
2952 ret = -EINVAL; in __btrfs_free_extent()
2971 extent_slot = path->slots[0]; in __btrfs_free_extent()
2973 btrfs_item_key_to_cpu(path->nodes[0], &key, in __btrfs_free_extent()
2989 if (path->slots[0] - extent_slot > 5) in __btrfs_free_extent()
2991 extent_slot--; in __btrfs_free_extent()
2997 "invalid iref, no EXTENT/METADATA_ITEM found but has inline extent ref"); in __btrfs_free_extent()
2998 btrfs_abort_transaction(trans, -EUCLEAN); in __btrfs_free_extent()
3022 &key, path, -1, 1); in __btrfs_free_extent()
3023 if (ret > 0 && skinny_metadata && path->slots[0]) { in __btrfs_free_extent()
3028 path->slots[0]--; in __btrfs_free_extent()
3029 btrfs_item_key_to_cpu(path->nodes[0], &key, in __btrfs_free_extent()
3030 path->slots[0]); in __btrfs_free_extent()
3044 &key, path, -1, 1); in __btrfs_free_extent()
3052 btrfs_print_leaf(path->nodes[0]); in __btrfs_free_extent()
3058 extent_slot = path->slots[0]; in __btrfs_free_extent()
3060 } else if (WARN_ON(ret == -ENOENT)) { in __btrfs_free_extent()
3061 btrfs_print_leaf(path->nodes[0]); in __btrfs_free_extent()
3073 leaf = path->nodes[0]; in __btrfs_free_extent()
3076 ret = -EINVAL; in __btrfs_free_extent()
3092 btrfs_abort_transaction(trans, -EUCLEAN); in __btrfs_free_extent()
3104 btrfs_abort_transaction(trans, -EUCLEAN); in __btrfs_free_extent()
3107 refs -= refs_to_drop; in __btrfs_free_extent()
3119 "invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found"); in __btrfs_free_extent()
3120 btrfs_abort_transaction(trans, -EUCLEAN); in __btrfs_free_extent()
3145 btrfs_abort_transaction(trans, -EUCLEAN); in __btrfs_free_extent()
3149 if (path->slots[0] != extent_slot) { in __btrfs_free_extent()
3154 btrfs_abort_transaction(trans, -EUCLEAN); in __btrfs_free_extent()
3159 * No inline ref, we must be at SHARED_* item, in __btrfs_free_extent()
3164 if (path->slots[0] != extent_slot + 1) { in __btrfs_free_extent()
3167 btrfs_abort_transaction(trans, -EUCLEAN); in __btrfs_free_extent()
3170 path->slots[0] = extent_slot; in __btrfs_free_extent()
3176 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], in __btrfs_free_extent()
3185 ret = btrfs_del_csums(trans, info->csum_root, bytenr, in __btrfs_free_extent()
3216 btrfs_crit(info, "path->slots[0]=%d extent_slot=%d", in __btrfs_free_extent()
3217 path->slots[0], extent_slot); in __btrfs_free_extent()
3218 btrfs_print_leaf(path->nodes[0]); in __btrfs_free_extent()
3222 return -EUCLEAN; in __btrfs_free_extent()
3228 * a given extent, and if there are no other delayed refs to be processed, it
3238 delayed_refs = &trans->transaction->delayed_refs; in check_ref_cleanup()
3239 spin_lock(&delayed_refs->lock); in check_ref_cleanup()
3244 spin_lock(&head->lock); in check_ref_cleanup()
3245 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root)) in check_ref_cleanup()
3255 if (!mutex_trylock(&head->mutex)) in check_ref_cleanup()
3259 head->processing = 0; in check_ref_cleanup()
3261 spin_unlock(&head->lock); in check_ref_cleanup()
3262 spin_unlock(&delayed_refs->lock); in check_ref_cleanup()
3264 BUG_ON(head->extent_op); in check_ref_cleanup()
3265 if (head->must_insert_reserved) in check_ref_cleanup()
3268 btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head); in check_ref_cleanup()
3269 mutex_unlock(&head->mutex); in check_ref_cleanup()
3273 spin_unlock(&head->lock); in check_ref_cleanup()
3276 spin_unlock(&delayed_refs->lock); in check_ref_cleanup()
3285 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_free_tree_block()
3290 buf->start, buf->len, parent); in btrfs_free_tree_block()
3292 root->root_key.objectid); in btrfs_free_tree_block()
3294 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { in btrfs_free_tree_block()
3297 BUG_ON(ret); /* -ENOMEM */ in btrfs_free_tree_block()
3300 if (last_ref && btrfs_header_generation(buf) == trans->transid) { in btrfs_free_tree_block()
3304 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { in btrfs_free_tree_block()
3305 ret = check_ref_cleanup(trans, buf->start); in btrfs_free_tree_block()
3307 btrfs_redirty_list_add(trans->transaction, buf); in btrfs_free_tree_block()
3312 cache = btrfs_lookup_block_group(fs_info, buf->start); in btrfs_free_tree_block()
3315 pin_down_extent(trans, cache, buf->start, buf->len, 1); in btrfs_free_tree_block()
3323 * So we must make sure no one reuses this leaf's extent before in btrfs_free_tree_block()
3328 * We are safe from races here because at this point no other in btrfs_free_tree_block()
3335 test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) in btrfs_free_tree_block()
3339 btrfs_redirty_list_add(trans->transaction, buf); in btrfs_free_tree_block()
3340 pin_down_extent(trans, cache, buf->start, buf->len, 1); in btrfs_free_tree_block()
3345 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); in btrfs_free_tree_block()
3347 btrfs_add_free_space(cache, buf->start, buf->len); in btrfs_free_tree_block()
3348 btrfs_free_reserved_bytes(cache, buf->len, 0); in btrfs_free_tree_block()
3350 trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len); in btrfs_free_tree_block()
3358 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); in btrfs_free_tree_block()
3362 /* Can return -ENOMEM */
3365 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_free_extent()
3375 if ((ref->type == BTRFS_REF_METADATA && in btrfs_free_extent()
3376 ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID) || in btrfs_free_extent()
3377 (ref->type == BTRFS_REF_DATA && in btrfs_free_extent()
3378 ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) { in btrfs_free_extent()
3380 btrfs_pin_extent(trans, ref->bytenr, ref->len, 1); in btrfs_free_extent()
3382 } else if (ref->type == BTRFS_REF_METADATA) { in btrfs_free_extent()
3388 if (!((ref->type == BTRFS_REF_METADATA && in btrfs_free_extent()
3389 ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID) || in btrfs_free_extent()
3390 (ref->type == BTRFS_REF_DATA && in btrfs_free_extent()
3391 ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID))) in btrfs_free_extent()
3409 down_read(&cache->data_rwsem); in btrfs_lock_block_group()
3417 down_read(&cache->data_rwsem); in btrfs_grab_block_group()
3424 __acquires(&cluster->refill_lock) in btrfs_lock_cluster()
3428 spin_lock(&cluster->refill_lock); in btrfs_lock_cluster()
3430 used_bg = cluster->block_group; in btrfs_lock_cluster()
3442 if (down_read_trylock(&used_bg->data_rwsem)) in btrfs_lock_cluster()
3445 spin_unlock(&cluster->refill_lock); in btrfs_lock_cluster()
3447 /* We should only have one-level nested. */ in btrfs_lock_cluster()
3448 down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING); in btrfs_lock_cluster()
3450 spin_lock(&cluster->refill_lock); in btrfs_lock_cluster()
3451 if (used_bg == cluster->block_group) in btrfs_lock_cluster()
3454 up_read(&used_bg->data_rwsem); in btrfs_lock_cluster()
3464 up_read(&cache->data_rwsem); in btrfs_release_block_group()
3495 /* Allocation is called for tree-log */
3507 * Whether we're refilling a cluster, if true we need to re-search
3513 * Whether we're updating free space cache, if true we need to re-search
3541 * Return -ENOENT to inform caller that we need fallback to unclustered mode.
3542 * Return -EAGAIN to inform caller that we need to re-search this block group
3544 * Return 0 means we have found a location and set ffe_ctl->found_offset.
3551 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; in find_free_extent_clustered()
3556 cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc); in find_free_extent_clustered()
3559 if (cluster_bg != bg && (cluster_bg->ro || in find_free_extent_clustered()
3560 !block_group_bits(cluster_bg, ffe_ctl->flags))) in find_free_extent_clustered()
3564 ffe_ctl->num_bytes, cluster_bg->start, in find_free_extent_clustered()
3565 &ffe_ctl->max_extent_size); in find_free_extent_clustered()
3568 spin_unlock(&last_ptr->refill_lock); in find_free_extent_clustered()
3570 ffe_ctl->search_start, ffe_ctl->num_bytes); in find_free_extent_clustered()
3572 ffe_ctl->found_offset = offset; in find_free_extent_clustered()
3575 WARN_ON(last_ptr->block_group != cluster_bg); in find_free_extent_clustered()
3589 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) { in find_free_extent_clustered()
3590 spin_unlock(&last_ptr->refill_lock); in find_free_extent_clustered()
3591 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); in find_free_extent_clustered()
3592 return -ENOENT; in find_free_extent_clustered()
3599 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); in find_free_extent_clustered()
3602 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) { in find_free_extent_clustered()
3603 spin_unlock(&last_ptr->refill_lock); in find_free_extent_clustered()
3604 return -ENOENT; in find_free_extent_clustered()
3608 ffe_ctl->empty_cluster + ffe_ctl->empty_size, in find_free_extent_clustered()
3609 bg->full_stripe_len); in find_free_extent_clustered()
3610 ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start, in find_free_extent_clustered()
3611 ffe_ctl->num_bytes, aligned_cluster); in find_free_extent_clustered()
3615 ffe_ctl->num_bytes, ffe_ctl->search_start, in find_free_extent_clustered()
3616 &ffe_ctl->max_extent_size); in find_free_extent_clustered()
3619 spin_unlock(&last_ptr->refill_lock); in find_free_extent_clustered()
3621 ffe_ctl->search_start, in find_free_extent_clustered()
3622 ffe_ctl->num_bytes); in find_free_extent_clustered()
3623 ffe_ctl->found_offset = offset; in find_free_extent_clustered()
3626 } else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT && in find_free_extent_clustered()
3627 !ffe_ctl->retry_clustered) { in find_free_extent_clustered()
3628 spin_unlock(&last_ptr->refill_lock); in find_free_extent_clustered()
3630 ffe_ctl->retry_clustered = true; in find_free_extent_clustered()
3631 btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes + in find_free_extent_clustered()
3632 ffe_ctl->empty_cluster + ffe_ctl->empty_size); in find_free_extent_clustered()
3633 return -EAGAIN; in find_free_extent_clustered()
3641 spin_unlock(&last_ptr->refill_lock); in find_free_extent_clustered()
3647 * Return 0 when we found an free extent and set ffe_ctrl->found_offset
3648 * Return -EAGAIN to inform caller that we need to re-search this block group
3653 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; in find_free_extent_unclustered()
3662 spin_lock(&last_ptr->lock); in find_free_extent_unclustered()
3663 last_ptr->fragmented = 1; in find_free_extent_unclustered()
3664 spin_unlock(&last_ptr->lock); in find_free_extent_unclustered()
3666 if (ffe_ctl->cached) { in find_free_extent_unclustered()
3669 free_space_ctl = bg->free_space_ctl; in find_free_extent_unclustered()
3670 spin_lock(&free_space_ctl->tree_lock); in find_free_extent_unclustered()
3671 if (free_space_ctl->free_space < in find_free_extent_unclustered()
3672 ffe_ctl->num_bytes + ffe_ctl->empty_cluster + in find_free_extent_unclustered()
3673 ffe_ctl->empty_size) { in find_free_extent_unclustered()
3674 ffe_ctl->total_free_space = max_t(u64, in find_free_extent_unclustered()
3675 ffe_ctl->total_free_space, in find_free_extent_unclustered()
3676 free_space_ctl->free_space); in find_free_extent_unclustered()
3677 spin_unlock(&free_space_ctl->tree_lock); in find_free_extent_unclustered()
3680 spin_unlock(&free_space_ctl->tree_lock); in find_free_extent_unclustered()
3683 offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start, in find_free_extent_unclustered()
3684 ffe_ctl->num_bytes, ffe_ctl->empty_size, in find_free_extent_unclustered()
3685 &ffe_ctl->max_extent_size); in find_free_extent_unclustered()
3696 if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached && in find_free_extent_unclustered()
3697 ffe_ctl->loop > LOOP_CACHING_NOWAIT) { in find_free_extent_unclustered()
3698 btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes + in find_free_extent_unclustered()
3699 ffe_ctl->empty_size); in find_free_extent_unclustered()
3700 ffe_ctl->retry_unclustered = true; in find_free_extent_unclustered()
3701 return -EAGAIN; in find_free_extent_unclustered()
3705 ffe_ctl->found_offset = offset; in find_free_extent_unclustered()
3716 if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) { in do_allocation_clustered()
3718 if (ret >= 0 || ret == -EAGAIN) in do_allocation_clustered()
3720 /* ret == -ENOENT case falls through */ in do_allocation_clustered()
3727 * Tree-log block group locking
3732 * for tree-log metadata.
3743 * Simple allocator for sequential-only block group. It only allows sequential
3744 * allocation. No need to play with trees. This function also reserves the
3751 struct btrfs_fs_info *fs_info = block_group->fs_info; in do_allocation_zoned()
3752 struct btrfs_space_info *space_info = block_group->space_info; in do_allocation_zoned()
3753 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in do_allocation_zoned()
3754 u64 start = block_group->start; in do_allocation_zoned()
3755 u64 num_bytes = ffe_ctl->num_bytes; in do_allocation_zoned()
3757 u64 bytenr = block_group->start; in do_allocation_zoned()
3762 ASSERT(btrfs_is_zoned(block_group->fs_info)); in do_allocation_zoned()
3765 * Do not allow non-tree-log blocks in the dedicated tree-log block in do_allocation_zoned()
3768 spin_lock(&fs_info->treelog_bg_lock); in do_allocation_zoned()
3769 log_bytenr = fs_info->treelog_bg; in do_allocation_zoned()
3770 skip = log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) || in do_allocation_zoned()
3771 (!ffe_ctl->for_treelog && bytenr == log_bytenr)); in do_allocation_zoned()
3772 spin_unlock(&fs_info->treelog_bg_lock); in do_allocation_zoned()
3776 spin_lock(&space_info->lock); in do_allocation_zoned()
3777 spin_lock(&block_group->lock); in do_allocation_zoned()
3778 spin_lock(&fs_info->treelog_bg_lock); in do_allocation_zoned()
3780 ASSERT(!ffe_ctl->for_treelog || in do_allocation_zoned()
3781 block_group->start == fs_info->treelog_bg || in do_allocation_zoned()
3782 fs_info->treelog_bg == 0); in do_allocation_zoned()
3784 if (block_group->ro) { in do_allocation_zoned()
3790 * Do not allow currently using block group to be tree-log dedicated in do_allocation_zoned()
3793 if (ffe_ctl->for_treelog && !fs_info->treelog_bg && in do_allocation_zoned()
3794 (block_group->used || block_group->reserved)) { in do_allocation_zoned()
3799 avail = block_group->length - block_group->alloc_offset; in do_allocation_zoned()
3801 if (ffe_ctl->max_extent_size < avail) { in do_allocation_zoned()
3806 ffe_ctl->max_extent_size = avail; in do_allocation_zoned()
3807 ffe_ctl->total_free_space = avail; in do_allocation_zoned()
3813 if (ffe_ctl->for_treelog && !fs_info->treelog_bg) in do_allocation_zoned()
3814 fs_info->treelog_bg = block_group->start; in do_allocation_zoned()
3816 ffe_ctl->found_offset = start + block_group->alloc_offset; in do_allocation_zoned()
3817 block_group->alloc_offset += num_bytes; in do_allocation_zoned()
3818 spin_lock(&ctl->tree_lock); in do_allocation_zoned()
3819 ctl->free_space -= num_bytes; in do_allocation_zoned()
3820 spin_unlock(&ctl->tree_lock); in do_allocation_zoned()
3827 ffe_ctl->search_start = ffe_ctl->found_offset; in do_allocation_zoned()
3830 if (ret && ffe_ctl->for_treelog) in do_allocation_zoned()
3831 fs_info->treelog_bg = 0; in do_allocation_zoned()
3832 spin_unlock(&fs_info->treelog_bg_lock); in do_allocation_zoned()
3833 spin_unlock(&block_group->lock); in do_allocation_zoned()
3834 spin_unlock(&space_info->lock); in do_allocation_zoned()
3842 switch (ffe_ctl->policy) { in do_allocation()
3856 switch (ffe_ctl->policy) { in release_block_group()
3858 ffe_ctl->retry_clustered = false; in release_block_group()
3859 ffe_ctl->retry_unclustered = false; in release_block_group()
3868 BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) != in release_block_group()
3869 ffe_ctl->index); in release_block_group()
3876 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; in found_extent_clustered()
3878 if (!ffe_ctl->use_cluster && last_ptr) { in found_extent_clustered()
3879 spin_lock(&last_ptr->lock); in found_extent_clustered()
3880 last_ptr->window_start = ins->objectid; in found_extent_clustered()
3881 spin_unlock(&last_ptr->lock); in found_extent_clustered()
3888 switch (ffe_ctl->policy) { in found_extent()
3902 switch (ffe_ctl->policy) { in chunk_allocation_failed()
3908 ffe_ctl->loop = LOOP_NO_EMPTY_SIZE; in chunk_allocation_failed()
3912 return -ENOSPC; in chunk_allocation_failed()
3919 * Return >0 means caller needs to re-search for free extent
3928 struct btrfs_root *root = fs_info->extent_root; in find_free_extent_update_loop()
3931 if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) && in find_free_extent_update_loop()
3932 ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg) in find_free_extent_update_loop()
3933 ffe_ctl->orig_have_caching_bg = true; in find_free_extent_update_loop()
3935 if (!ins->objectid && ffe_ctl->loop >= LOOP_CACHING_WAIT && in find_free_extent_update_loop()
3936 ffe_ctl->have_caching_bg) in find_free_extent_update_loop()
3939 if (!ins->objectid && ++(ffe_ctl->index) < BTRFS_NR_RAID_TYPES) in find_free_extent_update_loop()
3942 if (ins->objectid) { in find_free_extent_update_loop()
3955 if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) { in find_free_extent_update_loop()
3956 ffe_ctl->index = 0; in find_free_extent_update_loop()
3957 if (ffe_ctl->loop == LOOP_CACHING_NOWAIT) { in find_free_extent_update_loop()
3963 if (ffe_ctl->orig_have_caching_bg || !full_search) in find_free_extent_update_loop()
3964 ffe_ctl->loop = LOOP_CACHING_WAIT; in find_free_extent_update_loop()
3966 ffe_ctl->loop = LOOP_ALLOC_CHUNK; in find_free_extent_update_loop()
3968 ffe_ctl->loop++; in find_free_extent_update_loop()
3971 if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) { in find_free_extent_update_loop()
3975 trans = current->journal_info; in find_free_extent_update_loop()
3986 ret = btrfs_chunk_alloc(trans, ffe_ctl->flags, in find_free_extent_update_loop()
3990 if (ret == -ENOSPC) in find_free_extent_update_loop()
4002 if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) { in find_free_extent_update_loop()
4003 if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED) in find_free_extent_update_loop()
4004 return -ENOSPC; in find_free_extent_update_loop()
4007 * Don't loop again if we already have no empty_size and in find_free_extent_update_loop()
4008 * no empty_cluster. in find_free_extent_update_loop()
4010 if (ffe_ctl->empty_size == 0 && in find_free_extent_update_loop()
4011 ffe_ctl->empty_cluster == 0) in find_free_extent_update_loop()
4012 return -ENOSPC; in find_free_extent_update_loop()
4013 ffe_ctl->empty_size = 0; in find_free_extent_update_loop()
4014 ffe_ctl->empty_cluster = 0; in find_free_extent_update_loop()
4018 return -ENOSPC; in find_free_extent_update_loop()
4036 if (space_info->max_extent_size) { in prepare_allocation_clustered()
4037 spin_lock(&space_info->lock); in prepare_allocation_clustered()
4038 if (space_info->max_extent_size && in prepare_allocation_clustered()
4039 ffe_ctl->num_bytes > space_info->max_extent_size) { in prepare_allocation_clustered()
4040 ins->offset = space_info->max_extent_size; in prepare_allocation_clustered()
4041 spin_unlock(&space_info->lock); in prepare_allocation_clustered()
4042 return -ENOSPC; in prepare_allocation_clustered()
4043 } else if (space_info->max_extent_size) { in prepare_allocation_clustered()
4044 ffe_ctl->use_cluster = false; in prepare_allocation_clustered()
4046 spin_unlock(&space_info->lock); in prepare_allocation_clustered()
4049 ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info, in prepare_allocation_clustered()
4050 &ffe_ctl->empty_cluster); in prepare_allocation_clustered()
4051 if (ffe_ctl->last_ptr) { in prepare_allocation_clustered()
4052 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; in prepare_allocation_clustered()
4054 spin_lock(&last_ptr->lock); in prepare_allocation_clustered()
4055 if (last_ptr->block_group) in prepare_allocation_clustered()
4056 ffe_ctl->hint_byte = last_ptr->window_start; in prepare_allocation_clustered()
4057 if (last_ptr->fragmented) { in prepare_allocation_clustered()
4063 ffe_ctl->hint_byte = last_ptr->window_start; in prepare_allocation_clustered()
4064 ffe_ctl->use_cluster = false; in prepare_allocation_clustered()
4066 spin_unlock(&last_ptr->lock); in prepare_allocation_clustered()
4077 switch (ffe_ctl->policy) { in prepare_allocation()
4082 if (ffe_ctl->for_treelog) { in prepare_allocation()
4083 spin_lock(&fs_info->treelog_bg_lock); in prepare_allocation()
4084 if (fs_info->treelog_bg) in prepare_allocation()
4085 ffe_ctl->hint_byte = fs_info->treelog_bg; in prepare_allocation()
4086 spin_unlock(&fs_info->treelog_bg_lock); in prepare_allocation()
4097 * ins->objectid == start position
4098 * ins->flags = BTRFS_EXTENT_ITEM_KEY
4099 * ins->offset == the size of the hole.
4102 * If there is no suitable free space, we will record the max size of
4108 * |- Iterate through all block groups
4109 * | |- Get a valid block group
4110 * | |- Try to do clustered allocation in that block group
4111 * | |- Try to do unclustered allocation in that block group
4112 * | |- Check if the result is valid
4113 * | | |- If valid, then exit
4114 * | |- Jump to next block group
4116 * |- Push harder to find free extents
4117 * |- If not found, re-iterate all block groups
4124 struct btrfs_fs_info *fs_info = root->fs_info; in find_free_extent()
4131 bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); in find_free_extent()
4133 WARN_ON(num_bytes < fs_info->sectorsize); in find_free_extent()
4157 ins->type = BTRFS_EXTENT_ITEM_KEY; in find_free_extent()
4158 ins->objectid = 0; in find_free_extent()
4159 ins->offset = 0; in find_free_extent()
4165 btrfs_err(fs_info, "No space info for %llu", flags); in find_free_extent()
4166 return -ENOSPC; in find_free_extent()
4183 * However if we are re-searching with an ideal block group in find_free_extent()
4187 block_group->cached != BTRFS_CACHE_NO) { in find_free_extent()
4188 down_read(&space_info->groups_sem); in find_free_extent()
4189 if (list_empty(&block_group->list) || in find_free_extent()
4190 block_group->ro) { in find_free_extent()
4198 up_read(&space_info->groups_sem); in find_free_extent()
4201 block_group->flags); in find_free_extent()
4214 down_read(&space_info->groups_sem); in find_free_extent()
4216 &space_info->block_groups[ffe_ctl.index], list) { in find_free_extent()
4219 /* If the block group is read-only, we can skip it entirely. */ in find_free_extent()
4220 if (unlikely(block_group->ro)) { in find_free_extent()
4227 ffe_ctl.search_start = block_group->start; in find_free_extent()
4245 if ((flags & extra) && !(block_group->flags & extra)) in find_free_extent()
4250 * It's possible that we have MIXED_GROUP flag but no in find_free_extent()
4279 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) in find_free_extent()
4289 } else if (ret == -EAGAIN) { in find_free_extent()
4297 fs_info->stripesize); in find_free_extent()
4301 block_group->start + block_group->length) { in find_free_extent()
4310 ffe_ctl.search_start - ffe_ctl.found_offset); in find_free_extent()
4314 if (ret == -EAGAIN) { in find_free_extent()
4322 ins->objectid = ffe_ctl.search_start; in find_free_extent()
4323 ins->offset = num_bytes; in find_free_extent()
4333 up_read(&space_info->groups_sem); in find_free_extent()
4339 if (ret == -ENOSPC && !cache_block_group_error) { in find_free_extent()
4341 * Use ffe_ctl->total_free_space as fallback if we can't find in find_free_extent()
4346 spin_lock(&space_info->lock); in find_free_extent()
4347 space_info->max_extent_size = ffe_ctl.max_extent_size; in find_free_extent()
4348 spin_unlock(&space_info->lock); in find_free_extent()
4349 ins->offset = ffe_ctl.max_extent_size; in find_free_extent()
4350 } else if (ret == -ENOSPC) { in find_free_extent()
4357 * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a
4360 * @root - The root that will contain this extent
4362 * @ram_bytes - The amount of space in ram that @num_bytes take. This
4366 * @num_bytes - Number of bytes to allocate on-disk.
4368 * @min_alloc_size - Indicates the minimum amount of space that the
4375 * @empty_size - A hint that you plan on doing more COW. This is the
4380 * @hint_byte - Hint to the allocator to start searching above the byte
4383 * @ins - This key is modified to record the found hole. It will
4385 * ins->objectid == start position
4386 * ins->flags = BTRFS_EXTENT_ITEM_KEY
4387 * ins->offset == the size of the hole.
4389 * @is_data - Boolean flag indicating whether an extent is
4392 * @delalloc - Boolean flag indicating whether this allocation is for
4398 * case -ENOSPC is returned then @ins->offset will contain the size of the
4406 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_reserve_extent()
4410 bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); in btrfs_reserve_extent()
4414 WARN_ON(num_bytes < fs_info->sectorsize); in btrfs_reserve_extent()
4418 btrfs_dec_block_group_reservations(fs_info, ins->objectid); in btrfs_reserve_extent()
4419 } else if (ret == -ENOSPC) { in btrfs_reserve_extent()
4420 if (!final_tried && ins->offset) { in btrfs_reserve_extent()
4421 num_bytes = min(num_bytes >> 1, ins->offset); in btrfs_reserve_extent()
4423 fs_info->sectorsize); in btrfs_reserve_extent()
4434 "allocation failed flags %llu, wanted %llu tree-log %d", in btrfs_reserve_extent()
4454 return -ENOSPC; in btrfs_free_reserved_extent()
4471 cache = btrfs_lookup_block_group(trans->fs_info, start); in btrfs_pin_reserved_extent()
4473 btrfs_err(trans->fs_info, "unable to find block group for %llu", in btrfs_pin_reserved_extent()
4475 return -ENOSPC; in btrfs_pin_reserved_extent()
4488 struct btrfs_fs_info *fs_info = trans->fs_info; in alloc_reserved_file_extent()
4506 return -ENOMEM; in alloc_reserved_file_extent()
4508 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, in alloc_reserved_file_extent()
4515 leaf = path->nodes[0]; in alloc_reserved_file_extent()
4516 extent_item = btrfs_item_ptr(leaf, path->slots[0], in alloc_reserved_file_extent()
4519 btrfs_set_extent_generation(leaf, extent_item, trans->transid); in alloc_reserved_file_extent()
4532 ref = (struct btrfs_extent_data_ref *)(&iref->offset); in alloc_reserved_file_extent()
4539 btrfs_mark_buffer_dirty(path->nodes[0]); in alloc_reserved_file_extent()
4542 ret = remove_from_free_space_tree(trans, ins->objectid, ins->offset); in alloc_reserved_file_extent()
4546 ret = btrfs_update_block_group(trans, ins->objectid, ins->offset, 1); in alloc_reserved_file_extent()
4547 if (ret) { /* -ENOENT, logic error */ in alloc_reserved_file_extent()
4549 ins->objectid, ins->offset); in alloc_reserved_file_extent()
4552 trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset); in alloc_reserved_file_extent()
4560 struct btrfs_fs_info *fs_info = trans->fs_info; in alloc_reserved_tree_block()
4571 u64 flags = extent_op->flags_to_set; in alloc_reserved_tree_block()
4576 extent_key.objectid = node->bytenr; in alloc_reserved_tree_block()
4578 extent_key.offset = ref->level; in alloc_reserved_tree_block()
4580 num_bytes = fs_info->nodesize; in alloc_reserved_tree_block()
4582 extent_key.offset = node->num_bytes; in alloc_reserved_tree_block()
4585 num_bytes = node->num_bytes; in alloc_reserved_tree_block()
4590 return -ENOMEM; in alloc_reserved_tree_block()
4592 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, in alloc_reserved_tree_block()
4599 leaf = path->nodes[0]; in alloc_reserved_tree_block()
4600 extent_item = btrfs_item_ptr(leaf, path->slots[0], in alloc_reserved_tree_block()
4603 btrfs_set_extent_generation(leaf, extent_item, trans->transid); in alloc_reserved_tree_block()
4611 btrfs_set_tree_block_key(leaf, block_info, &extent_op->key); in alloc_reserved_tree_block()
4612 btrfs_set_tree_block_level(leaf, block_info, ref->level); in alloc_reserved_tree_block()
4616 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) { in alloc_reserved_tree_block()
4619 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent); in alloc_reserved_tree_block()
4623 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root); in alloc_reserved_tree_block()
4635 fs_info->nodesize, 1); in alloc_reserved_tree_block()
4636 if (ret) { /* -ENOENT, logic error */ in alloc_reserved_tree_block()
4643 fs_info->nodesize); in alloc_reserved_tree_block()
4654 BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); in btrfs_alloc_reserved_file_extent()
4657 ins->objectid, ins->offset, 0); in btrfs_alloc_reserved_file_extent()
4658 btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner, offset); in btrfs_alloc_reserved_file_extent()
4659 btrfs_ref_tree_mod(root->fs_info, &generic_ref); in btrfs_alloc_reserved_file_extent()
4673 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_alloc_logged_file_extent()
4683 ret = __exclude_logged_extent(fs_info, ins->objectid, in btrfs_alloc_logged_file_extent()
4684 ins->offset); in btrfs_alloc_logged_file_extent()
4689 block_group = btrfs_lookup_block_group(fs_info, ins->objectid); in btrfs_alloc_logged_file_extent()
4691 return -EINVAL; in btrfs_alloc_logged_file_extent()
4693 space_info = block_group->space_info; in btrfs_alloc_logged_file_extent()
4694 spin_lock(&space_info->lock); in btrfs_alloc_logged_file_extent()
4695 spin_lock(&block_group->lock); in btrfs_alloc_logged_file_extent()
4696 space_info->bytes_reserved += ins->offset; in btrfs_alloc_logged_file_extent()
4697 block_group->reserved += ins->offset; in btrfs_alloc_logged_file_extent()
4698 spin_unlock(&block_group->lock); in btrfs_alloc_logged_file_extent()
4699 spin_unlock(&space_info->lock); in btrfs_alloc_logged_file_extent()
4704 btrfs_pin_extent(trans, ins->objectid, ins->offset, 1); in btrfs_alloc_logged_file_extent()
4714 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_init_new_buffer()
4726 if (buf->lock_owner == current->pid) { in btrfs_init_new_buffer()
4729 buf->start, btrfs_header_owner(buf), current->pid); in btrfs_init_new_buffer()
4731 return ERR_PTR(-EUCLEAN); in btrfs_init_new_buffer()
4742 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); in btrfs_init_new_buffer()
4743 clear_bit(EXTENT_BUFFER_NO_CHECK, &buf->bflags); in btrfs_init_new_buffer()
4749 btrfs_set_header_bytenr(buf, buf->start); in btrfs_init_new_buffer()
4750 btrfs_set_header_generation(buf, trans->transid); in btrfs_init_new_buffer()
4753 write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid); in btrfs_init_new_buffer()
4754 write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid); in btrfs_init_new_buffer()
4755 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { in btrfs_init_new_buffer()
4756 buf->log_index = root->log_transid % 2; in btrfs_init_new_buffer()
4761 if (buf->log_index == 0) in btrfs_init_new_buffer()
4762 set_extent_dirty(&root->dirty_log_pages, buf->start, in btrfs_init_new_buffer()
4763 buf->start + buf->len - 1, GFP_NOFS); in btrfs_init_new_buffer()
4765 set_extent_new(&root->dirty_log_pages, buf->start, in btrfs_init_new_buffer()
4766 buf->start + buf->len - 1); in btrfs_init_new_buffer()
4768 buf->log_index = -1; in btrfs_init_new_buffer()
4769 set_extent_dirty(&trans->transaction->dirty_pages, buf->start, in btrfs_init_new_buffer()
4770 buf->start + buf->len - 1, GFP_NOFS); in btrfs_init_new_buffer()
4788 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_alloc_tree_block()
4796 u32 blocksize = fs_info->nodesize; in btrfs_alloc_tree_block()
4801 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr, in btrfs_alloc_tree_block()
4804 root->alloc_bytenr += blocksize; in btrfs_alloc_tree_block()
4835 ret = -ENOMEM; in btrfs_alloc_tree_block()
4839 memcpy(&extent_op->key, key, sizeof(extent_op->key)); in btrfs_alloc_tree_block()
4841 memset(&extent_op->key, 0, sizeof(extent_op->key)); in btrfs_alloc_tree_block()
4842 extent_op->flags_to_set = flags; in btrfs_alloc_tree_block()
4843 extent_op->update_key = skinny_metadata ? false : true; in btrfs_alloc_tree_block()
4844 extent_op->update_flags = true; in btrfs_alloc_tree_block()
4845 extent_op->is_data = false; in btrfs_alloc_tree_block()
4846 extent_op->level = level; in btrfs_alloc_tree_block()
4850 generic_ref.real_root = root->root_key.objectid; in btrfs_alloc_tree_block()
4892 struct walk_control *wc, in reada_walk_down() argument
4895 struct btrfs_fs_info *fs_info = root->fs_info; in reada_walk_down()
4907 if (path->slots[wc->level] < wc->reada_slot) { in reada_walk_down()
4908 wc->reada_count = wc->reada_count * 2 / 3; in reada_walk_down()
4909 wc->reada_count = max(wc->reada_count, 2); in reada_walk_down()
4911 wc->reada_count = wc->reada_count * 3 / 2; in reada_walk_down()
4912 wc->reada_count = min_t(int, wc->reada_count, in reada_walk_down()
4916 eb = path->nodes[wc->level]; in reada_walk_down()
4919 for (slot = path->slots[wc->level]; slot < nritems; slot++) { in reada_walk_down()
4920 if (nread >= wc->reada_count) in reada_walk_down()
4927 if (slot == path->slots[wc->level]) in reada_walk_down()
4930 if (wc->stage == UPDATE_BACKREF && in reada_walk_down()
4931 generation <= root->root_key.offset) in reada_walk_down()
4936 wc->level - 1, 1, &refs, in reada_walk_down()
4943 if (wc->stage == DROP_REFERENCE) { in reada_walk_down()
4947 if (wc->level == 1 && in reada_walk_down()
4950 if (!wc->update_ref || in reada_walk_down()
4951 generation <= root->root_key.offset) in reada_walk_down()
4955 &wc->update_progress); in reada_walk_down()
4959 if (wc->level == 1 && in reada_walk_down()
4967 wc->reada_slot = slot; in reada_walk_down()
4973 * when wc->stage == UPDATE_BACKREF, this function updates
4981 struct walk_control *wc, int lookup_info) in walk_down_proc() argument
4983 struct btrfs_fs_info *fs_info = root->fs_info; in walk_down_proc()
4984 int level = wc->level; in walk_down_proc()
4985 struct extent_buffer *eb = path->nodes[level]; in walk_down_proc()
4989 if (wc->stage == UPDATE_BACKREF && in walk_down_proc()
4990 btrfs_header_owner(eb) != root->root_key.objectid) in walk_down_proc()
4998 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || in walk_down_proc()
4999 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { in walk_down_proc()
5000 BUG_ON(!path->locks[level]); in walk_down_proc()
5002 eb->start, level, 1, in walk_down_proc()
5003 &wc->refs[level], in walk_down_proc()
5004 &wc->flags[level]); in walk_down_proc()
5005 BUG_ON(ret == -ENOMEM); in walk_down_proc()
5008 BUG_ON(wc->refs[level] == 0); in walk_down_proc()
5011 if (wc->stage == DROP_REFERENCE) { in walk_down_proc()
5012 if (wc->refs[level] > 1) in walk_down_proc()
5015 if (path->locks[level] && !wc->keep_locks) { in walk_down_proc()
5016 btrfs_tree_unlock_rw(eb, path->locks[level]); in walk_down_proc()
5017 path->locks[level] = 0; in walk_down_proc()
5022 /* wc->stage == UPDATE_BACKREF */ in walk_down_proc()
5023 if (!(wc->flags[level] & flag)) { in walk_down_proc()
5024 BUG_ON(!path->locks[level]); in walk_down_proc()
5026 BUG_ON(ret); /* -ENOMEM */ in walk_down_proc()
5028 BUG_ON(ret); /* -ENOMEM */ in walk_down_proc()
5031 BUG_ON(ret); /* -ENOMEM */ in walk_down_proc()
5032 wc->flags[level] |= flag; in walk_down_proc()
5039 if (path->locks[level] && level > 0) { in walk_down_proc()
5040 btrfs_tree_unlock_rw(eb, path->locks[level]); in walk_down_proc()
5041 path->locks[level] = 0; in walk_down_proc()
5060 return -ENOMEM; in check_ref_exists()
5063 root->fs_info->nodesize, parent, in check_ref_exists()
5064 root->root_key.objectid, level, 0); in check_ref_exists()
5066 if (ret == -ENOENT) in check_ref_exists()
5076 * when wc->stage == DROP_REFERENCE, this function checks
5079 * rooted at the block, this function changes wc->stage to
5080 * UPDATE_BACKREF. if the block is shared and there is no
5089 struct walk_control *wc, int *lookup_info) in do_walk_down() argument
5091 struct btrfs_fs_info *fs_info = root->fs_info; in do_walk_down()
5099 int level = wc->level; in do_walk_down()
5104 generation = btrfs_node_ptr_generation(path->nodes[level], in do_walk_down()
5105 path->slots[level]); in do_walk_down()
5108 * was created, we know there is no need to update back refs in do_walk_down()
5111 if (wc->stage == UPDATE_BACKREF && in do_walk_down()
5112 generation <= root->root_key.offset) { in do_walk_down()
5117 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); in do_walk_down()
5118 btrfs_node_key_to_cpu(path->nodes[level], &first_key, in do_walk_down()
5119 path->slots[level]); in do_walk_down()
5124 root->root_key.objectid, level - 1); in do_walk_down()
5131 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1, in do_walk_down()
5132 &wc->refs[level - 1], in do_walk_down()
5133 &wc->flags[level - 1]); in do_walk_down()
5137 if (unlikely(wc->refs[level - 1] == 0)) { in do_walk_down()
5139 ret = -EIO; in do_walk_down()
5144 if (wc->stage == DROP_REFERENCE) { in do_walk_down()
5145 if (wc->refs[level - 1] > 1) { in do_walk_down()
5148 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) in do_walk_down()
5151 if (!wc->update_ref || in do_walk_down()
5152 generation <= root->root_key.offset) in do_walk_down()
5155 btrfs_node_key_to_cpu(path->nodes[level], &key, in do_walk_down()
5156 path->slots[level]); in do_walk_down()
5157 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress); in do_walk_down()
5161 wc->stage = UPDATE_BACKREF; in do_walk_down()
5162 wc->shared_level = level - 1; in do_walk_down()
5166 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) in do_walk_down()
5179 reada_walk_down(trans, root, wc, path); in do_walk_down()
5180 next = read_tree_block(fs_info, bytenr, root->root_key.objectid, in do_walk_down()
5181 generation, level - 1, &first_key); in do_walk_down()
5186 return -EIO; in do_walk_down()
5191 level--; in do_walk_down()
5194 btrfs_err(root->fs_info, "mismatched level"); in do_walk_down()
5195 ret = -EIO; in do_walk_down()
5198 path->nodes[level] = next; in do_walk_down()
5199 path->slots[level] = 0; in do_walk_down()
5200 path->locks[level] = BTRFS_WRITE_LOCK; in do_walk_down()
5201 wc->level = level; in do_walk_down()
5202 if (wc->level == 1) in do_walk_down()
5203 wc->reada_slot = 0; in do_walk_down()
5206 wc->refs[level - 1] = 0; in do_walk_down()
5207 wc->flags[level - 1] = 0; in do_walk_down()
5208 if (wc->stage == DROP_REFERENCE) { in do_walk_down()
5209 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { in do_walk_down()
5210 parent = path->nodes[level]->start; in do_walk_down()
5212 ASSERT(root->root_key.objectid == in do_walk_down()
5213 btrfs_header_owner(path->nodes[level])); in do_walk_down()
5214 if (root->root_key.objectid != in do_walk_down()
5215 btrfs_header_owner(path->nodes[level])) { in do_walk_down()
5216 btrfs_err(root->fs_info, in do_walk_down()
5218 ret = -EIO; in do_walk_down()
5228 * ->restarted flag. in do_walk_down()
5230 if (wc->restarted) { in do_walk_down()
5232 level - 1); in do_walk_down()
5238 wc->restarted = 0; in do_walk_down()
5246 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && in do_walk_down()
5249 generation, level - 1); in do_walk_down()
5263 wc->drop_level = level; in do_walk_down()
5264 find_next_key(path, level, &wc->drop_progress); in do_walk_down()
5267 fs_info->nodesize, parent); in do_walk_down()
5268 btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid); in do_walk_down()
5287 * when wc->stage == DROP_REFERENCE, this function drops
5290 * when wc->stage == UPDATE_BACKREF, this function changes
5291 * wc->stage back to DROP_REFERENCE if we changed wc->stage
5299 struct walk_control *wc) in walk_up_proc() argument
5301 struct btrfs_fs_info *fs_info = root->fs_info; in walk_up_proc()
5303 int level = wc->level; in walk_up_proc()
5304 struct extent_buffer *eb = path->nodes[level]; in walk_up_proc()
5307 if (wc->stage == UPDATE_BACKREF) { in walk_up_proc()
5308 BUG_ON(wc->shared_level < level); in walk_up_proc()
5309 if (level < wc->shared_level) in walk_up_proc()
5312 ret = find_next_key(path, level + 1, &wc->update_progress); in walk_up_proc()
5314 wc->update_ref = 0; in walk_up_proc()
5316 wc->stage = DROP_REFERENCE; in walk_up_proc()
5317 wc->shared_level = -1; in walk_up_proc()
5318 path->slots[level] = 0; in walk_up_proc()
5325 if (!path->locks[level]) { in walk_up_proc()
5328 path->locks[level] = BTRFS_WRITE_LOCK; in walk_up_proc()
5331 eb->start, level, 1, in walk_up_proc()
5332 &wc->refs[level], in walk_up_proc()
5333 &wc->flags[level]); in walk_up_proc()
5335 btrfs_tree_unlock_rw(eb, path->locks[level]); in walk_up_proc()
5336 path->locks[level] = 0; in walk_up_proc()
5339 BUG_ON(wc->refs[level] == 0); in walk_up_proc()
5340 if (wc->refs[level] == 1) { in walk_up_proc()
5341 btrfs_tree_unlock_rw(eb, path->locks[level]); in walk_up_proc()
5342 path->locks[level] = 0; in walk_up_proc()
5348 /* wc->stage == DROP_REFERENCE */ in walk_up_proc()
5349 BUG_ON(wc->refs[level] > 1 && !path->locks[level]); in walk_up_proc()
5351 if (wc->refs[level] == 1) { in walk_up_proc()
5353 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) in walk_up_proc()
5357 BUG_ON(ret); /* -ENOMEM */ in walk_up_proc()
5358 if (is_fstree(root->root_key.objectid)) { in walk_up_proc()
5368 if (!path->locks[level] && in walk_up_proc()
5369 btrfs_header_generation(eb) == trans->transid) { in walk_up_proc()
5371 path->locks[level] = BTRFS_WRITE_LOCK; in walk_up_proc()
5376 if (eb == root->node) { in walk_up_proc()
5377 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) in walk_up_proc()
5378 parent = eb->start; in walk_up_proc()
5379 else if (root->root_key.objectid != btrfs_header_owner(eb)) in walk_up_proc()
5382 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) in walk_up_proc()
5383 parent = path->nodes[level + 1]->start; in walk_up_proc()
5384 else if (root->root_key.objectid != in walk_up_proc()
5385 btrfs_header_owner(path->nodes[level + 1])) in walk_up_proc()
5389 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1); in walk_up_proc()
5391 wc->refs[level] = 0; in walk_up_proc()
5392 wc->flags[level] = 0; in walk_up_proc()
5397 btrfs_header_owner(eb), root->root_key.objectid); in walk_up_proc()
5398 return -EUCLEAN; in walk_up_proc()
5404 struct walk_control *wc) in walk_down_tree() argument
5406 int level = wc->level; in walk_down_tree()
5411 ret = walk_down_proc(trans, root, path, wc, lookup_info); in walk_down_tree()
5418 if (path->slots[level] >= in walk_down_tree()
5419 btrfs_header_nritems(path->nodes[level])) in walk_down_tree()
5422 ret = do_walk_down(trans, root, path, wc, &lookup_info); in walk_down_tree()
5424 path->slots[level]++; in walk_down_tree()
5428 level = wc->level; in walk_down_tree()
5436 struct walk_control *wc, int max_level) in walk_up_tree() argument
5438 int level = wc->level; in walk_up_tree()
5441 path->slots[level] = btrfs_header_nritems(path->nodes[level]); in walk_up_tree()
5442 while (level < max_level && path->nodes[level]) { in walk_up_tree()
5443 wc->level = level; in walk_up_tree()
5444 if (path->slots[level] + 1 < in walk_up_tree()
5445 btrfs_header_nritems(path->nodes[level])) { in walk_up_tree()
5446 path->slots[level]++; in walk_up_tree()
5449 ret = walk_up_proc(trans, root, path, wc); in walk_up_tree()
5455 if (path->locks[level]) { in walk_up_tree()
5456 btrfs_tree_unlock_rw(path->nodes[level], in walk_up_tree()
5457 path->locks[level]); in walk_up_tree()
5458 path->locks[level] = 0; in walk_up_tree()
5460 free_extent_buffer(path->nodes[level]); in walk_up_tree()
5461 path->nodes[level] = NULL; in walk_up_tree()
5479 * If called with for_reloc == 0, may exit early with -EAGAIN
5483 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_drop_snapshot()
5486 struct btrfs_root *tree_root = fs_info->tree_root; in btrfs_drop_snapshot()
5487 struct btrfs_root_item *root_item = &root->root_item; in btrfs_drop_snapshot()
5488 struct walk_control *wc; in btrfs_drop_snapshot() local
5495 btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid); in btrfs_drop_snapshot()
5499 err = -ENOMEM; in btrfs_drop_snapshot()
5503 wc = kzalloc(sizeof(*wc), GFP_NOFS); in btrfs_drop_snapshot()
5504 if (!wc) { in btrfs_drop_snapshot()
5506 err = -ENOMEM; in btrfs_drop_snapshot()
5535 set_bit(BTRFS_ROOT_DELETING, &root->state); in btrfs_drop_snapshot()
5536 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { in btrfs_drop_snapshot()
5537 level = btrfs_header_level(root->node); in btrfs_drop_snapshot()
5538 path->nodes[level] = btrfs_lock_root_node(root); in btrfs_drop_snapshot()
5539 path->slots[level] = 0; in btrfs_drop_snapshot()
5540 path->locks[level] = BTRFS_WRITE_LOCK; in btrfs_drop_snapshot()
5541 memset(&wc->update_progress, 0, in btrfs_drop_snapshot()
5542 sizeof(wc->update_progress)); in btrfs_drop_snapshot()
5544 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); in btrfs_drop_snapshot()
5545 memcpy(&wc->update_progress, &key, in btrfs_drop_snapshot()
5546 sizeof(wc->update_progress)); in btrfs_drop_snapshot()
5550 path->lowest_level = level; in btrfs_drop_snapshot()
5552 path->lowest_level = 0; in btrfs_drop_snapshot()
5565 level = btrfs_header_level(root->node); in btrfs_drop_snapshot()
5567 btrfs_tree_lock(path->nodes[level]); in btrfs_drop_snapshot()
5568 path->locks[level] = BTRFS_WRITE_LOCK; in btrfs_drop_snapshot()
5571 path->nodes[level]->start, in btrfs_drop_snapshot()
5572 level, 1, &wc->refs[level], in btrfs_drop_snapshot()
5573 &wc->flags[level]); in btrfs_drop_snapshot()
5578 BUG_ON(wc->refs[level] == 0); in btrfs_drop_snapshot()
5583 btrfs_tree_unlock(path->nodes[level]); in btrfs_drop_snapshot()
5584 path->locks[level] = 0; in btrfs_drop_snapshot()
5585 WARN_ON(wc->refs[level] != 1); in btrfs_drop_snapshot()
5586 level--; in btrfs_drop_snapshot()
5590 wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state); in btrfs_drop_snapshot()
5591 wc->level = level; in btrfs_drop_snapshot()
5592 wc->shared_level = -1; in btrfs_drop_snapshot()
5593 wc->stage = DROP_REFERENCE; in btrfs_drop_snapshot()
5594 wc->update_ref = update_ref; in btrfs_drop_snapshot()
5595 wc->keep_locks = 0; in btrfs_drop_snapshot()
5596 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); in btrfs_drop_snapshot()
5600 ret = walk_down_tree(trans, root, path, wc); in btrfs_drop_snapshot()
5606 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); in btrfs_drop_snapshot()
5613 BUG_ON(wc->stage != DROP_REFERENCE); in btrfs_drop_snapshot()
5617 if (wc->stage == DROP_REFERENCE) { in btrfs_drop_snapshot()
5618 wc->drop_level = wc->level; in btrfs_drop_snapshot()
5619 btrfs_node_key_to_cpu(path->nodes[wc->drop_level], in btrfs_drop_snapshot()
5620 &wc->drop_progress, in btrfs_drop_snapshot()
5621 path->slots[wc->drop_level]); in btrfs_drop_snapshot()
5623 btrfs_cpu_key_to_disk(&root_item->drop_progress, in btrfs_drop_snapshot()
5624 &wc->drop_progress); in btrfs_drop_snapshot()
5625 btrfs_set_root_drop_level(root_item, wc->drop_level); in btrfs_drop_snapshot()
5627 BUG_ON(wc->level == 0); in btrfs_drop_snapshot()
5631 &root->root_key, in btrfs_drop_snapshot()
5643 err = -EAGAIN; in btrfs_drop_snapshot()
5666 ret = btrfs_del_root(trans, &root->root_key); in btrfs_drop_snapshot()
5673 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { in btrfs_drop_snapshot()
5674 ret = btrfs_find_root(tree_root, &root->root_key, path, in btrfs_drop_snapshot()
5684 * The most common failure here is just -ENOENT. in btrfs_drop_snapshot()
5687 root->root_key.objectid); in btrfs_drop_snapshot()
5699 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) in btrfs_drop_snapshot()
5707 kfree(wc); in btrfs_drop_snapshot()
5715 * or unmount) so we don't leak memory. in btrfs_drop_snapshot()
5733 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_drop_subtree()
5735 struct walk_control *wc; in btrfs_drop_subtree() local
5741 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); in btrfs_drop_subtree()
5745 return -ENOMEM; in btrfs_drop_subtree()
5747 wc = kzalloc(sizeof(*wc), GFP_NOFS); in btrfs_drop_subtree()
5748 if (!wc) { in btrfs_drop_subtree()
5750 return -ENOMEM; in btrfs_drop_subtree()
5755 atomic_inc(&parent->refs); in btrfs_drop_subtree()
5756 path->nodes[parent_level] = parent; in btrfs_drop_subtree()
5757 path->slots[parent_level] = btrfs_header_nritems(parent); in btrfs_drop_subtree()
5761 path->nodes[level] = node; in btrfs_drop_subtree()
5762 path->slots[level] = 0; in btrfs_drop_subtree()
5763 path->locks[level] = BTRFS_WRITE_LOCK; in btrfs_drop_subtree()
5765 wc->refs[parent_level] = 1; in btrfs_drop_subtree()
5766 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; in btrfs_drop_subtree()
5767 wc->level = level; in btrfs_drop_subtree()
5768 wc->shared_level = -1; in btrfs_drop_subtree()
5769 wc->stage = DROP_REFERENCE; in btrfs_drop_subtree()
5770 wc->update_ref = 0; in btrfs_drop_subtree()
5771 wc->keep_locks = 1; in btrfs_drop_subtree()
5772 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); in btrfs_drop_subtree()
5775 wret = walk_down_tree(trans, root, path, wc); in btrfs_drop_subtree()
5781 wret = walk_up_tree(trans, root, path, wc, parent_level); in btrfs_drop_subtree()
5788 kfree(wc); in btrfs_drop_subtree()
5804 if (list_empty(&sinfo->ro_bgs)) in btrfs_account_ro_block_groups_free_space()
5807 spin_lock(&sinfo->lock); in btrfs_account_ro_block_groups_free_space()
5808 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) { in btrfs_account_ro_block_groups_free_space()
5809 spin_lock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
5811 if (!block_group->ro) { in btrfs_account_ro_block_groups_free_space()
5812 spin_unlock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
5816 factor = btrfs_bg_type_to_factor(block_group->flags); in btrfs_account_ro_block_groups_free_space()
5817 free_bytes += (block_group->length - in btrfs_account_ro_block_groups_free_space()
5818 block_group->used) * factor; in btrfs_account_ro_block_groups_free_space()
5820 spin_unlock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
5822 spin_unlock(&sinfo->lock); in btrfs_account_ro_block_groups_free_space()
5847 * from committing and releasing the memory that the pending chunks
5861 if (!blk_queue_discard(bdev_get_queue(device->bdev))) in btrfs_trim_free_extents()
5865 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) in btrfs_trim_free_extents()
5868 /* No free space = nothing to do. */ in btrfs_trim_free_extents()
5869 if (device->total_bytes <= device->bytes_used) in btrfs_trim_free_extents()
5875 struct btrfs_fs_info *fs_info = device->fs_info; in btrfs_trim_free_extents()
5878 ret = mutex_lock_interruptible(&fs_info->chunk_mutex); in btrfs_trim_free_extents()
5882 find_first_clear_extent_bit(&device->alloc_state, start, in btrfs_trim_free_extents()
5887 if (start > device->total_bytes) { in btrfs_trim_free_extents()
5891 start, end - start + 1, in btrfs_trim_free_extents()
5892 rcu_str_deref(device->name), in btrfs_trim_free_extents()
5893 device->total_bytes); in btrfs_trim_free_extents()
5894 mutex_unlock(&fs_info->chunk_mutex); in btrfs_trim_free_extents()
5904 * end of the device it will set end to -1, in this case it's up in btrfs_trim_free_extents()
5907 end = min(end, device->total_bytes - 1); in btrfs_trim_free_extents()
5909 len = end - start + 1; in btrfs_trim_free_extents()
5913 mutex_unlock(&fs_info->chunk_mutex); in btrfs_trim_free_extents()
5918 ret = btrfs_issue_discard(device->bdev, start, len, in btrfs_trim_free_extents()
5921 set_extent_bits(&device->alloc_state, start, in btrfs_trim_free_extents()
5922 start + bytes - 1, in btrfs_trim_free_extents()
5924 mutex_unlock(&fs_info->chunk_mutex); in btrfs_trim_free_extents()
5933 ret = -ERESTARTSYS; in btrfs_trim_free_extents()
5954 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; in btrfs_trim_fs()
5969 * Check range overflow if range->len is set. in btrfs_trim_fs()
5970 * The default range->len is U64_MAX. in btrfs_trim_fs()
5972 if (range->len != U64_MAX && in btrfs_trim_fs()
5973 check_add_overflow(range->start, range->len, &range_end)) in btrfs_trim_fs()
5974 return -EINVAL; in btrfs_trim_fs()
5976 cache = btrfs_lookup_first_block_group(fs_info, range->start); in btrfs_trim_fs()
5978 if (cache->start >= range_end) { in btrfs_trim_fs()
5983 start = max(range->start, cache->start); in btrfs_trim_fs()
5984 end = min(range_end, cache->start + cache->length); in btrfs_trim_fs()
5986 if (end - start >= range->minlen) { in btrfs_trim_fs()
6005 range->minlen); in btrfs_trim_fs()
6021 mutex_lock(&fs_devices->device_list_mutex); in btrfs_trim_fs()
6022 list_for_each_entry(device, &fs_devices->devices, dev_list) { in btrfs_trim_fs()
6023 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) in btrfs_trim_fs()
6035 mutex_unlock(&fs_devices->device_list_mutex); in btrfs_trim_fs()
6041 range->len = trimmed; in btrfs_trim_fs()