Lines Matching +full:cache +full:- +full:block
1 // SPDX-License-Identifier: GPL-2.0
7 #include "block-group.h"
8 #include "space-info.h"
9 #include "disk-io.h"
10 #include "free-space-cache.h"
11 #include "free-space-tree.h"
14 #include "ref-verify.h"
16 #include "tree-log.h"
17 #include "delalloc-space.h"
23 #include "extent-tree.h"
28 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_should_fragment_free_space()
31 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || in btrfs_should_fragment_free_space()
33 block_group->flags & BTRFS_BLOCK_GROUP_DATA); in btrfs_should_fragment_free_space()
45 struct btrfs_balance_control *bctl = fs_info->balance_ctl; in get_restripe_target()
52 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { in get_restripe_target()
53 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; in get_restripe_target()
55 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { in get_restripe_target()
56 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; in get_restripe_target()
58 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { in get_restripe_target()
59 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; in get_restripe_target()
74 u64 num_devices = fs_info->fs_devices->rw_devices; in btrfs_reduce_alloc_profile()
83 spin_lock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
86 spin_unlock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
89 spin_unlock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
98 /* Select the highest-redundancy RAID level. */ in btrfs_reduce_alloc_profile()
128 seq = read_seqbegin(&fs_info->profiles_lock); in btrfs_get_alloc_profile()
131 flags |= fs_info->avail_data_alloc_bits; in btrfs_get_alloc_profile()
133 flags |= fs_info->avail_system_alloc_bits; in btrfs_get_alloc_profile()
135 flags |= fs_info->avail_metadata_alloc_bits; in btrfs_get_alloc_profile()
136 } while (read_seqretry(&fs_info->profiles_lock, seq)); in btrfs_get_alloc_profile()
141 void btrfs_get_block_group(struct btrfs_block_group *cache) in btrfs_get_block_group() argument
143 refcount_inc(&cache->refs); in btrfs_get_block_group()
146 void btrfs_put_block_group(struct btrfs_block_group *cache) in btrfs_put_block_group() argument
148 if (refcount_dec_and_test(&cache->refs)) { in btrfs_put_block_group()
149 WARN_ON(cache->pinned > 0); in btrfs_put_block_group()
157 if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) || in btrfs_put_block_group()
158 !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info)) in btrfs_put_block_group()
159 WARN_ON(cache->reserved > 0); in btrfs_put_block_group()
166 if (WARN_ON(!list_empty(&cache->discard_list))) in btrfs_put_block_group()
167 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, in btrfs_put_block_group()
168 cache); in btrfs_put_block_group()
170 kfree(cache->free_space_ctl); in btrfs_put_block_group()
171 kfree(cache->physical_map); in btrfs_put_block_group()
172 kfree(cache); in btrfs_put_block_group()
177 * This adds the block group to the fs_info rb tree for the block group cache
184 struct btrfs_block_group *cache; in btrfs_add_block_group_cache() local
187 ASSERT(block_group->length != 0); in btrfs_add_block_group_cache()
189 write_lock(&info->block_group_cache_lock); in btrfs_add_block_group_cache()
190 p = &info->block_group_cache_tree.rb_root.rb_node; in btrfs_add_block_group_cache()
194 cache = rb_entry(parent, struct btrfs_block_group, cache_node); in btrfs_add_block_group_cache()
195 if (block_group->start < cache->start) { in btrfs_add_block_group_cache()
196 p = &(*p)->rb_left; in btrfs_add_block_group_cache()
197 } else if (block_group->start > cache->start) { in btrfs_add_block_group_cache()
198 p = &(*p)->rb_right; in btrfs_add_block_group_cache()
201 write_unlock(&info->block_group_cache_lock); in btrfs_add_block_group_cache()
202 return -EEXIST; in btrfs_add_block_group_cache()
206 rb_link_node(&block_group->cache_node, parent, p); in btrfs_add_block_group_cache()
207 rb_insert_color_cached(&block_group->cache_node, in btrfs_add_block_group_cache()
208 &info->block_group_cache_tree, leftmost); in btrfs_add_block_group_cache()
210 write_unlock(&info->block_group_cache_lock); in btrfs_add_block_group_cache()
216 * This will return the block group at or after bytenr if contains is 0, else
217 * it will return the block group that contains the bytenr
222 struct btrfs_block_group *cache, *ret = NULL; in block_group_cache_tree_search() local
226 read_lock(&info->block_group_cache_lock); in block_group_cache_tree_search()
227 n = info->block_group_cache_tree.rb_root.rb_node; in block_group_cache_tree_search()
230 cache = rb_entry(n, struct btrfs_block_group, cache_node); in block_group_cache_tree_search()
231 end = cache->start + cache->length - 1; in block_group_cache_tree_search()
232 start = cache->start; in block_group_cache_tree_search()
235 if (!contains && (!ret || start < ret->start)) in block_group_cache_tree_search()
236 ret = cache; in block_group_cache_tree_search()
237 n = n->rb_left; in block_group_cache_tree_search()
240 ret = cache; in block_group_cache_tree_search()
243 n = n->rb_right; in block_group_cache_tree_search()
245 ret = cache; in block_group_cache_tree_search()
251 read_unlock(&info->block_group_cache_lock); in block_group_cache_tree_search()
257 * Return the block group that starts at or after bytenr
266 * Return the block group that contains the given bytenr
275 struct btrfs_block_group *cache) in btrfs_next_block_group() argument
277 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_next_block_group()
280 read_lock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
282 /* If our block group was removed, we need a full search. */ in btrfs_next_block_group()
283 if (RB_EMPTY_NODE(&cache->cache_node)) { in btrfs_next_block_group()
284 const u64 next_bytenr = cache->start + cache->length; in btrfs_next_block_group()
286 read_unlock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
287 btrfs_put_block_group(cache); in btrfs_next_block_group()
290 node = rb_next(&cache->cache_node); in btrfs_next_block_group()
291 btrfs_put_block_group(cache); in btrfs_next_block_group()
293 cache = rb_entry(node, struct btrfs_block_group, cache_node); in btrfs_next_block_group()
294 btrfs_get_block_group(cache); in btrfs_next_block_group()
296 cache = NULL; in btrfs_next_block_group()
297 read_unlock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
298 return cache; in btrfs_next_block_group()
308 * number of NOCOW writers in the block group that contains the extent, as long
309 * as the block group exists and it's currently not in read-only mode.
311 * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller
326 spin_lock(&bg->lock); in btrfs_inc_nocow_writers()
327 if (bg->ro) in btrfs_inc_nocow_writers()
330 atomic_inc(&bg->nocow_writers); in btrfs_inc_nocow_writers()
331 spin_unlock(&bg->lock); in btrfs_inc_nocow_writers()
338 /* No put on block group, done by btrfs_dec_nocow_writers(). */ in btrfs_inc_nocow_writers()
343 * Decrement the number of NOCOW writers in a block group.
346 * and on the block group returned by that call. Typically this is called after
350 * After this call, the caller should not use the block group anymore. It it wants
355 if (atomic_dec_and_test(&bg->nocow_writers)) in btrfs_dec_nocow_writers()
356 wake_up_var(&bg->nocow_writers); in btrfs_dec_nocow_writers()
364 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); in btrfs_wait_nocow_writers()
374 if (atomic_dec_and_test(&bg->reservations)) in btrfs_dec_block_group_reservations()
375 wake_up_var(&bg->reservations); in btrfs_dec_block_group_reservations()
381 struct btrfs_space_info *space_info = bg->space_info; in btrfs_wait_block_group_reservations()
383 ASSERT(bg->ro); in btrfs_wait_block_group_reservations()
385 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) in btrfs_wait_block_group_reservations()
389 * Our block group is read only but before we set it to read only, in btrfs_wait_block_group_reservations()
394 * block group's reservations counter is incremented while a read lock in btrfs_wait_block_group_reservations()
398 down_write(&space_info->groups_sem); in btrfs_wait_block_group_reservations()
399 up_write(&space_info->groups_sem); in btrfs_wait_block_group_reservations()
401 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); in btrfs_wait_block_group_reservations()
405 struct btrfs_block_group *cache) in btrfs_get_caching_control() argument
409 spin_lock(&cache->lock); in btrfs_get_caching_control()
410 if (!cache->caching_ctl) { in btrfs_get_caching_control()
411 spin_unlock(&cache->lock); in btrfs_get_caching_control()
415 ctl = cache->caching_ctl; in btrfs_get_caching_control()
416 refcount_inc(&ctl->count); in btrfs_get_caching_control()
417 spin_unlock(&cache->lock); in btrfs_get_caching_control()
423 if (refcount_dec_and_test(&ctl->count)) in btrfs_put_caching_control()
428 * When we wait for progress in the block group caching, its because our
433 * up, and then it will check the block group free space numbers for our min
437 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
438 * any of the information in this block group.
440 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, in btrfs_wait_block_group_cache_progress() argument
446 caching_ctl = btrfs_get_caching_control(cache); in btrfs_wait_block_group_cache_progress()
451 * We've already failed to allocate from this block group, so even if in btrfs_wait_block_group_cache_progress()
452 * there's enough space in the block group it isn't contiguous enough to in btrfs_wait_block_group_cache_progress()
456 progress = atomic_read(&caching_ctl->progress); in btrfs_wait_block_group_cache_progress()
458 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || in btrfs_wait_block_group_cache_progress()
459 (progress != atomic_read(&caching_ctl->progress) && in btrfs_wait_block_group_cache_progress()
460 (cache->free_space_ctl->free_space >= num_bytes))); in btrfs_wait_block_group_cache_progress()
465 static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache, in btrfs_caching_ctl_wait_done() argument
468 wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); in btrfs_caching_ctl_wait_done()
469 return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0; in btrfs_caching_ctl_wait_done()
472 static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) in btrfs_wait_block_group_cache_done() argument
477 caching_ctl = btrfs_get_caching_control(cache); in btrfs_wait_block_group_cache_done()
479 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; in btrfs_wait_block_group_cache_done()
480 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); in btrfs_wait_block_group_cache_done()
488 struct btrfs_fs_info *fs_info = block_group->fs_info; in fragment_free_space()
489 u64 start = block_group->start; in fragment_free_space()
490 u64 len = block_group->length; in fragment_free_space()
491 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? in fragment_free_space()
492 fs_info->nodesize : fs_info->sectorsize; in fragment_free_space()
501 len -= step; in fragment_free_space()
507 * Add a free space range to the in memory free space cache of a block group.
508 * This checks if the range contains super block locations and any such
509 * locations are not added to the free space cache.
511 * @block_group: The target block group.
515 * added to the block group's free space cache.
522 struct btrfs_fs_info *info = block_group->fs_info; in btrfs_add_new_free_space()
530 if (!find_first_extent_bit(&info->excluded_extents, start, in btrfs_add_new_free_space()
539 size = extent_start - start; in btrfs_add_new_free_space()
553 size = end - start; in btrfs_add_new_free_space()
566 * Get an arbitrary extent item index / max_index through the block group
568 * @block_group the block group to sample from
569 * @index: the integral step through the block group to grab from
573 * Pre-conditions on indices:
585 struct btrfs_fs_info *fs_info = block_group->fs_info; in sample_block_group_extent_item()
588 u64 search_end = block_group->start + block_group->length; in sample_block_group_extent_item()
596 lockdep_assert_held(&caching_ctl->mutex); in sample_block_group_extent_item()
597 lockdep_assert_held_read(&fs_info->commit_root_sem); in sample_block_group_extent_item()
601 return -ENOMEM; in sample_block_group_extent_item()
603 extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start, in sample_block_group_extent_item()
606 path->skip_locking = 1; in sample_block_group_extent_item()
607 path->search_commit_root = 1; in sample_block_group_extent_item()
608 path->reada = READA_FORWARD; in sample_block_group_extent_item()
610 search_offset = index * div_u64(block_group->length, max_index); in sample_block_group_extent_item()
611 search_key.objectid = block_group->start + search_offset; in sample_block_group_extent_item()
616 /* Success; sampled an extent item in the block group */ in sample_block_group_extent_item()
617 if (found_key->type == BTRFS_EXTENT_ITEM_KEY && in sample_block_group_extent_item()
618 found_key->objectid >= block_group->start && in sample_block_group_extent_item()
619 found_key->objectid + found_key->offset <= search_end) in sample_block_group_extent_item()
623 if (found_key->objectid >= search_end) { in sample_block_group_extent_item()
629 lockdep_assert_held(&caching_ctl->mutex); in sample_block_group_extent_item()
630 lockdep_assert_held_read(&fs_info->commit_root_sem); in sample_block_group_extent_item()
636 * Best effort attempt to compute a block group's size class while caching it.
638 * @block_group: the block group we are caching
645 * them at even steps through the block group and pick the smallest size class
651 * If we are caching in a block group from disk, then there are three major cases
653 * 1. the block group is well behaved and all extents in it are the same size
655 * 2. the block group is mostly one size class with rare exceptions for last
657 * 3. the block group was populated before size classes and can have a totally
660 * In case 1, looking at any extent in the block group will yield the correct
672 struct btrfs_fs_info *fs_info = block_group->fs_info; in load_block_group_size_class()
675 u64 min_size = block_group->length; in load_block_group_size_class()
682 lockdep_assert_held(&caching_ctl->mutex); in load_block_group_size_class()
683 lockdep_assert_held_read(&fs_info->commit_root_sem); in load_block_group_size_class()
694 spin_lock(&block_group->lock); in load_block_group_size_class()
695 block_group->size_class = size_class; in load_block_group_size_class()
696 spin_unlock(&block_group->lock); in load_block_group_size_class()
704 struct btrfs_block_group *block_group = caching_ctl->block_group; in load_extent_tree_free()
705 struct btrfs_fs_info *fs_info = block_group->fs_info; in load_extent_tree_free()
718 return -ENOMEM; in load_extent_tree_free()
720 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); in load_extent_tree_free()
726 * allocate from this block group until we've had a chance to fragment in load_extent_tree_free()
736 * root, since its read-only in load_extent_tree_free()
738 path->skip_locking = 1; in load_extent_tree_free()
739 path->search_commit_root = 1; in load_extent_tree_free()
740 path->reada = READA_FORWARD; in load_extent_tree_free()
751 leaf = path->nodes[0]; in load_extent_tree_free()
756 last = (u64)-1; in load_extent_tree_free()
760 if (path->slots[0] < nritems) { in load_extent_tree_free()
761 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in load_extent_tree_free()
768 rwsem_is_contended(&fs_info->commit_root_sem)) { in load_extent_tree_free()
770 up_read(&fs_info->commit_root_sem); in load_extent_tree_free()
771 mutex_unlock(&caching_ctl->mutex); in load_extent_tree_free()
773 mutex_lock(&caching_ctl->mutex); in load_extent_tree_free()
774 down_read(&fs_info->commit_root_sem); in load_extent_tree_free()
783 leaf = path->nodes[0]; in load_extent_tree_free()
796 if (key.objectid < block_group->start) { in load_extent_tree_free()
797 path->slots[0]++; in load_extent_tree_free()
801 if (key.objectid >= block_group->start + block_group->length) in load_extent_tree_free()
815 fs_info->nodesize; in load_extent_tree_free()
822 atomic_inc(&caching_ctl->progress); in load_extent_tree_free()
823 wake_up(&caching_ctl->wait); in load_extent_tree_free()
827 path->slots[0]++; in load_extent_tree_free()
831 block_group->start + block_group->length, in load_extent_tree_free()
840 clear_extent_bits(&bg->fs_info->excluded_extents, bg->start, in btrfs_free_excluded_extents()
841 bg->start + bg->length - 1, EXTENT_UPTODATE); in btrfs_free_excluded_extents()
852 block_group = caching_ctl->block_group; in caching_thread()
853 fs_info = block_group->fs_info; in caching_thread()
855 mutex_lock(&caching_ctl->mutex); in caching_thread()
856 down_read(&fs_info->commit_root_sem); in caching_thread()
867 * We failed to load the space cache, set ourselves to in caching_thread()
870 spin_lock(&block_group->lock); in caching_thread()
871 block_group->cached = BTRFS_CACHE_STARTED; in caching_thread()
872 spin_unlock(&block_group->lock); in caching_thread()
873 wake_up(&caching_ctl->wait); in caching_thread()
878 * can't actually cache from the free space tree as our commit root and in caching_thread()
884 !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) in caching_thread()
889 spin_lock(&block_group->lock); in caching_thread()
890 block_group->caching_ctl = NULL; in caching_thread()
891 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; in caching_thread()
892 spin_unlock(&block_group->lock); in caching_thread()
898 spin_lock(&block_group->space_info->lock); in caching_thread()
899 spin_lock(&block_group->lock); in caching_thread()
900 bytes_used = block_group->length - block_group->used; in caching_thread()
901 block_group->space_info->bytes_used += bytes_used >> 1; in caching_thread()
902 spin_unlock(&block_group->lock); in caching_thread()
903 spin_unlock(&block_group->space_info->lock); in caching_thread()
908 up_read(&fs_info->commit_root_sem); in caching_thread()
910 mutex_unlock(&caching_ctl->mutex); in caching_thread()
912 wake_up(&caching_ctl->wait); in caching_thread()
918 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) in btrfs_cache_block_group() argument
920 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_cache_block_group()
924 /* Allocator for zoned filesystems does not use the cache at all */ in btrfs_cache_block_group()
930 return -ENOMEM; in btrfs_cache_block_group()
932 INIT_LIST_HEAD(&caching_ctl->list); in btrfs_cache_block_group()
933 mutex_init(&caching_ctl->mutex); in btrfs_cache_block_group()
934 init_waitqueue_head(&caching_ctl->wait); in btrfs_cache_block_group()
935 caching_ctl->block_group = cache; in btrfs_cache_block_group()
936 refcount_set(&caching_ctl->count, 2); in btrfs_cache_block_group()
937 atomic_set(&caching_ctl->progress, 0); in btrfs_cache_block_group()
938 btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); in btrfs_cache_block_group()
940 spin_lock(&cache->lock); in btrfs_cache_block_group()
941 if (cache->cached != BTRFS_CACHE_NO) { in btrfs_cache_block_group()
944 caching_ctl = cache->caching_ctl; in btrfs_cache_block_group()
946 refcount_inc(&caching_ctl->count); in btrfs_cache_block_group()
947 spin_unlock(&cache->lock); in btrfs_cache_block_group()
950 WARN_ON(cache->caching_ctl); in btrfs_cache_block_group()
951 cache->caching_ctl = caching_ctl; in btrfs_cache_block_group()
952 cache->cached = BTRFS_CACHE_STARTED; in btrfs_cache_block_group()
953 spin_unlock(&cache->lock); in btrfs_cache_block_group()
955 write_lock(&fs_info->block_group_cache_lock); in btrfs_cache_block_group()
956 refcount_inc(&caching_ctl->count); in btrfs_cache_block_group()
957 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); in btrfs_cache_block_group()
958 write_unlock(&fs_info->block_group_cache_lock); in btrfs_cache_block_group()
960 btrfs_get_block_group(cache); in btrfs_cache_block_group()
962 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); in btrfs_cache_block_group()
965 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); in btrfs_cache_block_group()
977 write_seqlock(&fs_info->profiles_lock); in clear_avail_alloc_bits()
979 fs_info->avail_data_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
981 fs_info->avail_metadata_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
983 fs_info->avail_system_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
984 write_sequnlock(&fs_info->profiles_lock); in clear_avail_alloc_bits()
990 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
993 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
1003 struct list_head *head = &fs_info->space_info; in clear_incompat_bg_bits()
1007 down_read(&sinfo->groups_sem); in clear_incompat_bg_bits()
1008 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) in clear_incompat_bg_bits()
1010 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) in clear_incompat_bg_bits()
1012 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) in clear_incompat_bg_bits()
1014 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) in clear_incompat_bg_bits()
1016 up_read(&sinfo->groups_sem); in clear_incompat_bg_bits()
1029 struct btrfs_fs_info *fs_info = trans->fs_info; in remove_block_group_item()
1035 key.objectid = block_group->start; in remove_block_group_item()
1037 key.offset = block_group->length; in remove_block_group_item()
1039 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); in remove_block_group_item()
1041 ret = -ENOENT; in remove_block_group_item()
1052 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_remove_block_group()
1067 BUG_ON(!block_group->ro); in btrfs_remove_block_group()
1071 * Free the reserved super bytes from this block group before in btrfs_remove_block_group()
1075 btrfs_free_ref_tree_range(fs_info, block_group->start, in btrfs_remove_block_group()
1076 block_group->length); in btrfs_remove_block_group()
1078 index = btrfs_bg_flags_to_raid_index(block_group->flags); in btrfs_remove_block_group()
1079 factor = btrfs_bg_type_to_factor(block_group->flags); in btrfs_remove_block_group()
1081 /* make sure this block group isn't part of an allocation cluster */ in btrfs_remove_block_group()
1082 cluster = &fs_info->data_alloc_cluster; in btrfs_remove_block_group()
1083 spin_lock(&cluster->refill_lock); in btrfs_remove_block_group()
1085 spin_unlock(&cluster->refill_lock); in btrfs_remove_block_group()
1088 * make sure this block group isn't part of a metadata in btrfs_remove_block_group()
1091 cluster = &fs_info->meta_alloc_cluster; in btrfs_remove_block_group()
1092 spin_lock(&cluster->refill_lock); in btrfs_remove_block_group()
1094 spin_unlock(&cluster->refill_lock); in btrfs_remove_block_group()
1101 ret = -ENOMEM; in btrfs_remove_block_group()
1111 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_remove_block_group()
1113 * Make sure our free space cache IO is done before removing the in btrfs_remove_block_group()
1116 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1117 if (!list_empty(&block_group->io_list)) { in btrfs_remove_block_group()
1118 list_del_init(&block_group->io_list); in btrfs_remove_block_group()
1120 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); in btrfs_remove_block_group()
1122 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1125 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1128 if (!list_empty(&block_group->dirty_list)) { in btrfs_remove_block_group()
1129 list_del_init(&block_group->dirty_list); in btrfs_remove_block_group()
1133 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1134 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_remove_block_group()
1140 write_lock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1141 rb_erase_cached(&block_group->cache_node, in btrfs_remove_block_group()
1142 &fs_info->block_group_cache_tree); in btrfs_remove_block_group()
1143 RB_CLEAR_NODE(&block_group->cache_node); in btrfs_remove_block_group()
1145 /* Once for the block groups rbtree */ in btrfs_remove_block_group()
1148 write_unlock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1150 down_write(&block_group->space_info->groups_sem); in btrfs_remove_block_group()
1155 list_del_init(&block_group->list); in btrfs_remove_block_group()
1156 if (list_empty(&block_group->space_info->block_groups[index])) { in btrfs_remove_block_group()
1157 kobj = block_group->space_info->block_group_kobjs[index]; in btrfs_remove_block_group()
1158 block_group->space_info->block_group_kobjs[index] = NULL; in btrfs_remove_block_group()
1159 clear_avail_alloc_bits(fs_info, block_group->flags); in btrfs_remove_block_group()
1161 up_write(&block_group->space_info->groups_sem); in btrfs_remove_block_group()
1162 clear_incompat_bg_bits(fs_info, block_group->flags); in btrfs_remove_block_group()
1168 if (block_group->cached == BTRFS_CACHE_STARTED) in btrfs_remove_block_group()
1171 write_lock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1176 list_for_each_entry(ctl, &fs_info->caching_block_groups, list) { in btrfs_remove_block_group()
1177 if (ctl->block_group == block_group) { in btrfs_remove_block_group()
1179 refcount_inc(&caching_ctl->count); in btrfs_remove_block_group()
1185 list_del_init(&caching_ctl->list); in btrfs_remove_block_group()
1186 write_unlock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1194 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1195 WARN_ON(!list_empty(&block_group->dirty_list)); in btrfs_remove_block_group()
1196 WARN_ON(!list_empty(&block_group->io_list)); in btrfs_remove_block_group()
1197 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1201 spin_lock(&block_group->space_info->lock); in btrfs_remove_block_group()
1202 list_del_init(&block_group->ro_list); in btrfs_remove_block_group()
1205 WARN_ON(block_group->space_info->total_bytes in btrfs_remove_block_group()
1206 < block_group->length); in btrfs_remove_block_group()
1207 WARN_ON(block_group->space_info->bytes_readonly in btrfs_remove_block_group()
1208 < block_group->length - block_group->zone_unusable); in btrfs_remove_block_group()
1209 WARN_ON(block_group->space_info->bytes_zone_unusable in btrfs_remove_block_group()
1210 < block_group->zone_unusable); in btrfs_remove_block_group()
1211 WARN_ON(block_group->space_info->disk_total in btrfs_remove_block_group()
1212 < block_group->length * factor); in btrfs_remove_block_group()
1214 block_group->space_info->total_bytes -= block_group->length; in btrfs_remove_block_group()
1215 block_group->space_info->bytes_readonly -= in btrfs_remove_block_group()
1216 (block_group->length - block_group->zone_unusable); in btrfs_remove_block_group()
1217 block_group->space_info->bytes_zone_unusable -= in btrfs_remove_block_group()
1218 block_group->zone_unusable; in btrfs_remove_block_group()
1219 block_group->space_info->disk_total -= block_group->length * factor; in btrfs_remove_block_group()
1221 spin_unlock(&block_group->space_info->lock); in btrfs_remove_block_group()
1224 * Remove the free space for the block group from the free space tree in btrfs_remove_block_group()
1225 * and the block group's item from the extent tree before marking the in btrfs_remove_block_group()
1226 * block group as removed. This is to prevent races with tasks that in btrfs_remove_block_group()
1227 * freeze and unfreeze a block group, this task and another task in btrfs_remove_block_group()
1228 * allocating a new block group - the unfreeze task ends up removing in btrfs_remove_block_group()
1229 * the block group's extent map before the task calling this function in btrfs_remove_block_group()
1230 * deletes the block group item from the extent tree, allowing for in btrfs_remove_block_group()
1231 * another task to attempt to create another block group with the same in btrfs_remove_block_group()
1232 * item key (and failing with -EEXIST and a transaction abort). in btrfs_remove_block_group()
1242 spin_lock(&block_group->lock); in btrfs_remove_block_group()
1243 set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags); in btrfs_remove_block_group()
1246 * At this point trimming or scrub can't start on this block group, in btrfs_remove_block_group()
1247 * because we removed the block group from the rbtree in btrfs_remove_block_group()
1248 * fs_info->block_group_cache_tree so no one can't find it anymore and in btrfs_remove_block_group()
1249 * even if someone already got this block group before we removed it in btrfs_remove_block_group()
1250 * from the rbtree, they have already incremented block_group->frozen - in btrfs_remove_block_group()
1255 * And we must not remove the extent map from the fs_info->mapping_tree in btrfs_remove_block_group()
1257 * ranges from being reused for a new block group. This is needed to in btrfs_remove_block_group()
1263 * allowing for new block groups to be created that can reuse the same in btrfs_remove_block_group()
1267 * is mounted with -odiscard. The same protections must remain in btrfs_remove_block_group()
1271 remove_em = (atomic_read(&block_group->frozen) == 0); in btrfs_remove_block_group()
1272 spin_unlock(&block_group->lock); in btrfs_remove_block_group()
1277 em_tree = &fs_info->mapping_tree; in btrfs_remove_block_group()
1278 write_lock(&em_tree->lock); in btrfs_remove_block_group()
1280 write_unlock(&em_tree->lock); in btrfs_remove_block_group()
1298 struct extent_map_tree *em_tree = &fs_info->mapping_tree; in btrfs_start_trans_remove_block_group()
1303 read_lock(&em_tree->lock); in btrfs_start_trans_remove_block_group()
1305 read_unlock(&em_tree->lock); in btrfs_start_trans_remove_block_group()
1306 ASSERT(em && em->start == chunk_offset); in btrfs_start_trans_remove_block_group()
1310 * to remove a block group (done at btrfs_remove_chunk() and at in btrfs_start_trans_remove_block_group()
1315 * 1 unit for deleting the block group item (located in the extent in btrfs_start_trans_remove_block_group()
1322 * In order to remove a block group we also need to reserve units in the in btrfs_start_trans_remove_block_group()
1327 map = em->map_lookup; in btrfs_start_trans_remove_block_group()
1328 num_items = 3 + map->num_stripes; in btrfs_start_trans_remove_block_group()
1335 * Mark block group @cache read-only, so later write won't happen to block
1336 * group @cache.
1338 * If @force is not set, this function will only mark the block group readonly
1339 * if we have enough free space (1M) in other metadata/system block groups.
1340 * If @force is not set, this function will mark the block group readonly
1343 * NOTE: This function doesn't care if other block groups can contain all the
1344 * data in this block group. That check should be done by relocation routine,
1347 static int inc_block_group_ro(struct btrfs_block_group *cache, int force) in inc_block_group_ro() argument
1349 struct btrfs_space_info *sinfo = cache->space_info; in inc_block_group_ro()
1351 int ret = -ENOSPC; in inc_block_group_ro()
1353 spin_lock(&sinfo->lock); in inc_block_group_ro()
1354 spin_lock(&cache->lock); in inc_block_group_ro()
1356 if (cache->swap_extents) { in inc_block_group_ro()
1357 ret = -ETXTBSY; in inc_block_group_ro()
1361 if (cache->ro) { in inc_block_group_ro()
1362 cache->ro++; in inc_block_group_ro()
1367 num_bytes = cache->length - cache->reserved - cache->pinned - in inc_block_group_ro()
1368 cache->bytes_super - cache->zone_unusable - cache->used; in inc_block_group_ro()
1376 } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { in inc_block_group_ro()
1383 if (sinfo_used + num_bytes <= sinfo->total_bytes) in inc_block_group_ro()
1390 * leeway to allow us to mark this block group as read only. in inc_block_group_ro()
1392 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, in inc_block_group_ro()
1398 sinfo->bytes_readonly += num_bytes; in inc_block_group_ro()
1399 if (btrfs_is_zoned(cache->fs_info)) { in inc_block_group_ro()
1401 sinfo->bytes_readonly += cache->zone_unusable; in inc_block_group_ro()
1402 sinfo->bytes_zone_unusable -= cache->zone_unusable; in inc_block_group_ro()
1403 cache->zone_unusable = 0; in inc_block_group_ro()
1405 cache->ro++; in inc_block_group_ro()
1406 list_add_tail(&cache->ro_list, &sinfo->ro_bgs); in inc_block_group_ro()
1409 spin_unlock(&cache->lock); in inc_block_group_ro()
1410 spin_unlock(&sinfo->lock); in inc_block_group_ro()
1411 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { in inc_block_group_ro()
1412 btrfs_info(cache->fs_info, in inc_block_group_ro()
1413 "unable to make block group %llu ro", cache->start); in inc_block_group_ro()
1414 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); in inc_block_group_ro()
1422 struct btrfs_fs_info *fs_info = bg->fs_info; in clean_pinned_extents()
1424 const u64 start = bg->start; in clean_pinned_extents()
1425 const u64 end = start + bg->length - 1; in clean_pinned_extents()
1428 spin_lock(&fs_info->trans_lock); in clean_pinned_extents()
1429 if (trans->transaction->list.prev != &fs_info->trans_list) { in clean_pinned_extents()
1430 prev_trans = list_last_entry(&trans->transaction->list, in clean_pinned_extents()
1432 refcount_inc(&prev_trans->use_count); in clean_pinned_extents()
1434 spin_unlock(&fs_info->trans_lock); in clean_pinned_extents()
1440 * transaction N - 1, and have seen a range belonging to the block in clean_pinned_extents()
1441 * group in pinned_extents before we were able to clear the whole block in clean_pinned_extents()
1443 * the block group after we unpinned it from pinned_extents and removed in clean_pinned_extents()
1446 mutex_lock(&fs_info->unused_bg_unpin_mutex); in clean_pinned_extents()
1448 ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, in clean_pinned_extents()
1454 ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, in clean_pinned_extents()
1457 mutex_unlock(&fs_info->unused_bg_unpin_mutex); in clean_pinned_extents()
1476 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) in btrfs_delete_unused_bgs()
1486 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) in btrfs_delete_unused_bgs()
1489 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1490 while (!list_empty(&fs_info->unused_bgs)) { in btrfs_delete_unused_bgs()
1493 block_group = list_first_entry(&fs_info->unused_bgs, in btrfs_delete_unused_bgs()
1496 list_del_init(&block_group->bg_list); in btrfs_delete_unused_bgs()
1498 space_info = block_group->space_info; in btrfs_delete_unused_bgs()
1504 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1506 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); in btrfs_delete_unused_bgs()
1509 down_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1512 * Async discard moves the final block group discard to be prior in btrfs_delete_unused_bgs()
1519 up_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1521 btrfs_discard_queue_work(&fs_info->discard_ctl, in btrfs_delete_unused_bgs()
1526 spin_lock(&block_group->lock); in btrfs_delete_unused_bgs()
1527 if (block_group->reserved || block_group->pinned || in btrfs_delete_unused_bgs()
1528 block_group->used || block_group->ro || in btrfs_delete_unused_bgs()
1529 list_is_singular(&block_group->list)) { in btrfs_delete_unused_bgs()
1532 * outstanding allocations in this block group. We do in btrfs_delete_unused_bgs()
1534 * this block group. in btrfs_delete_unused_bgs()
1537 spin_unlock(&block_group->lock); in btrfs_delete_unused_bgs()
1538 up_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1541 spin_unlock(&block_group->lock); in btrfs_delete_unused_bgs()
1545 up_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1554 if (ret == -EAGAIN) in btrfs_delete_unused_bgs()
1564 block_group->start); in btrfs_delete_unused_bgs()
1572 * We could have pending pinned extents for this block group, in btrfs_delete_unused_bgs()
1587 spin_lock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1588 if (!list_empty(&block_group->discard_list)) { in btrfs_delete_unused_bgs()
1589 spin_unlock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1591 btrfs_discard_queue_work(&fs_info->discard_ctl, in btrfs_delete_unused_bgs()
1595 spin_unlock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1598 spin_lock(&space_info->lock); in btrfs_delete_unused_bgs()
1599 spin_lock(&block_group->lock); in btrfs_delete_unused_bgs()
1602 -block_group->pinned); in btrfs_delete_unused_bgs()
1603 space_info->bytes_readonly += block_group->pinned; in btrfs_delete_unused_bgs()
1604 block_group->pinned = 0; in btrfs_delete_unused_bgs()
1606 spin_unlock(&block_group->lock); in btrfs_delete_unused_bgs()
1607 spin_unlock(&space_info->lock); in btrfs_delete_unused_bgs()
1610 * The normal path here is an unused block group is passed here, in btrfs_delete_unused_bgs()
1613 * before coming down the unused block group path as trimming in btrfs_delete_unused_bgs()
1621 * need to reset sequential-required zones. in btrfs_delete_unused_bgs()
1634 ret = btrfs_remove_chunk(trans, block_group->start); in btrfs_delete_unused_bgs()
1643 * If we're not mounted with -odiscard, we can just forget in btrfs_delete_unused_bgs()
1644 * about this block group. Otherwise we'll need to wait in btrfs_delete_unused_bgs()
1648 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1651 * fs_info->unused_bgs, so use a list_move operation in btrfs_delete_unused_bgs()
1652 * to add the block group to the deleted_bgs list. in btrfs_delete_unused_bgs()
1654 list_move(&block_group->bg_list, in btrfs_delete_unused_bgs()
1655 &trans->transaction->deleted_bgs); in btrfs_delete_unused_bgs()
1656 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1663 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1665 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1666 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_delete_unused_bgs()
1671 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_delete_unused_bgs()
1678 struct btrfs_fs_info *fs_info = bg->fs_info; in btrfs_mark_bg_unused()
1680 spin_lock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_unused()
1681 if (list_empty(&bg->bg_list)) { in btrfs_mark_bg_unused()
1684 list_add_tail(&bg->bg_list, &fs_info->unused_bgs); in btrfs_mark_bg_unused()
1685 } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) { in btrfs_mark_bg_unused()
1686 /* Pull out the block group from the reclaim_bgs list. */ in btrfs_mark_bg_unused()
1688 list_move_tail(&bg->bg_list, &fs_info->unused_bgs); in btrfs_mark_bg_unused()
1690 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_unused()
1694 * We want block groups with a low number of used bytes to be in the beginning
1705 return bg1->used > bg2->used; in reclaim_bgs_cmp()
1717 const struct btrfs_space_info *space_info = bg->space_info; in should_reclaim_block_group()
1718 const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold); in should_reclaim_block_group()
1719 const u64 new_val = bg->used; in should_reclaim_block_group()
1726 thresh = mult_perc(bg->length, reclaim_thresh); in should_reclaim_block_group()
1730 * brand new block group and we don't want to relocate new block groups. in should_reclaim_block_group()
1746 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) in btrfs_reclaim_bgs_work()
1755 sb_start_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1758 sb_end_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1766 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { in btrfs_reclaim_bgs_work()
1768 sb_end_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1772 spin_lock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1775 * The block groups might still be in use and reachable via bg_list, in btrfs_reclaim_bgs_work()
1778 list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); in btrfs_reclaim_bgs_work()
1779 while (!list_empty(&fs_info->reclaim_bgs)) { in btrfs_reclaim_bgs_work()
1783 bg = list_first_entry(&fs_info->reclaim_bgs, in btrfs_reclaim_bgs_work()
1786 list_del_init(&bg->bg_list); in btrfs_reclaim_bgs_work()
1788 space_info = bg->space_info; in btrfs_reclaim_bgs_work()
1789 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1792 down_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1794 spin_lock(&bg->lock); in btrfs_reclaim_bgs_work()
1795 if (bg->reserved || bg->pinned || bg->ro) { in btrfs_reclaim_bgs_work()
1798 * outstanding allocations in this block group. We do in btrfs_reclaim_bgs_work()
1800 * this block group. in btrfs_reclaim_bgs_work()
1802 spin_unlock(&bg->lock); in btrfs_reclaim_bgs_work()
1803 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1806 if (bg->used == 0) { in btrfs_reclaim_bgs_work()
1808 * It is possible that we trigger relocation on a block in btrfs_reclaim_bgs_work()
1814 * for the non-existent extents and running some extra in btrfs_reclaim_bgs_work()
1816 * other mechanisms for dealing with empty block groups. in btrfs_reclaim_bgs_work()
1820 spin_unlock(&bg->lock); in btrfs_reclaim_bgs_work()
1821 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1826 * The block group might no longer meet the reclaim condition by in btrfs_reclaim_bgs_work()
1835 if (!should_reclaim_block_group(bg, bg->length)) { in btrfs_reclaim_bgs_work()
1836 spin_unlock(&bg->lock); in btrfs_reclaim_bgs_work()
1837 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1840 spin_unlock(&bg->lock); in btrfs_reclaim_bgs_work()
1843 * Get out fast, in case we're read-only or unmounting the in btrfs_reclaim_bgs_work()
1844 * filesystem. It is OK to drop block groups from the list even in btrfs_reclaim_bgs_work()
1845 * for the read-only case. As we did sb_start_write(), in btrfs_reclaim_bgs_work()
1846 * "mount -o remount,ro" won't happen and read-only filesystem in btrfs_reclaim_bgs_work()
1847 * means it is forced read-only due to a fatal error. So, it in btrfs_reclaim_bgs_work()
1848 * never gets back to read-write to let us reclaim again. in btrfs_reclaim_bgs_work()
1851 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1856 * Cache the zone_unusable value before turning the block group in btrfs_reclaim_bgs_work()
1858 * zone_unusable value gets moved to the block group's read-only in btrfs_reclaim_bgs_work()
1861 zone_unusable = bg->zone_unusable; in btrfs_reclaim_bgs_work()
1863 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1869 bg->start, in btrfs_reclaim_bgs_work()
1870 div64_u64(bg->used * 100, bg->length), in btrfs_reclaim_bgs_work()
1871 div64_u64(zone_unusable * 100, bg->length)); in btrfs_reclaim_bgs_work()
1873 ret = btrfs_relocate_chunk(fs_info, bg->start); in btrfs_reclaim_bgs_work()
1877 bg->start); in btrfs_reclaim_bgs_work()
1885 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_reclaim_bgs_work()
1887 * Reclaiming all the block groups in the list can take really in btrfs_reclaim_bgs_work()
1888 * long. Prioritize cleaning up unused block groups. in btrfs_reclaim_bgs_work()
1895 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) in btrfs_reclaim_bgs_work()
1897 spin_lock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1899 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1900 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_reclaim_bgs_work()
1903 sb_end_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1908 spin_lock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs()
1909 if (!list_empty(&fs_info->reclaim_bgs)) in btrfs_reclaim_bgs()
1910 queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work); in btrfs_reclaim_bgs()
1911 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs()
1916 struct btrfs_fs_info *fs_info = bg->fs_info; in btrfs_mark_bg_to_reclaim()
1918 spin_lock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_to_reclaim()
1919 if (list_empty(&bg->bg_list)) { in btrfs_mark_bg_to_reclaim()
1922 list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs); in btrfs_mark_bg_to_reclaim()
1924 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_to_reclaim()
1938 slot = path->slots[0]; in read_bg_from_eb()
1939 leaf = path->nodes[0]; in read_bg_from_eb()
1941 em_tree = &fs_info->mapping_tree; in read_bg_from_eb()
1942 read_lock(&em_tree->lock); in read_bg_from_eb()
1943 em = lookup_extent_mapping(em_tree, key->objectid, key->offset); in read_bg_from_eb()
1944 read_unlock(&em_tree->lock); in read_bg_from_eb()
1948 key->objectid, key->offset); in read_bg_from_eb()
1949 return -ENOENT; in read_bg_from_eb()
1952 if (em->start != key->objectid || em->len != key->offset) { in read_bg_from_eb()
1954 "block group %llu len %llu mismatch with chunk %llu len %llu", in read_bg_from_eb()
1955 key->objectid, key->offset, em->start, em->len); in read_bg_from_eb()
1956 ret = -EUCLEAN; in read_bg_from_eb()
1965 if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { in read_bg_from_eb()
1967 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", in read_bg_from_eb()
1968 key->objectid, key->offset, flags, in read_bg_from_eb()
1969 (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type)); in read_bg_from_eb()
1970 ret = -EUCLEAN; in read_bg_from_eb()
1987 if (found_key.objectid >= key->objectid && in find_first_block_group()
2000 write_seqlock(&fs_info->profiles_lock); in set_avail_alloc_bits()
2002 fs_info->avail_data_alloc_bits |= extra_flags; in set_avail_alloc_bits()
2004 fs_info->avail_metadata_alloc_bits |= extra_flags; in set_avail_alloc_bits()
2006 fs_info->avail_system_alloc_bits |= extra_flags; in set_avail_alloc_bits()
2007 write_sequnlock(&fs_info->profiles_lock); in set_avail_alloc_bits()
2014 * @chunk_start: logical address of block group
2018 * @stripe_len: size of IO stripe for the given block group
2021 * Used primarily to exclude those portions of a block group that contain super
2022 * block copies.
2038 return -EIO; in btrfs_rmap_block()
2040 map = em->map_lookup; in btrfs_rmap_block()
2041 data_stripe_length = em->orig_block_len; in btrfs_rmap_block()
2043 chunk_start = em->start; in btrfs_rmap_block()
2046 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) in btrfs_rmap_block()
2049 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); in btrfs_rmap_block()
2051 ret = -ENOMEM; in btrfs_rmap_block()
2055 for (i = 0; i < map->num_stripes; i++) { in btrfs_rmap_block()
2061 if (!in_range(physical, map->stripes[i].physical, in btrfs_rmap_block()
2065 stripe_nr = (physical - map->stripes[i].physical) >> in btrfs_rmap_block()
2067 offset = (physical - map->stripes[i].physical) & in btrfs_rmap_block()
2070 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | in btrfs_rmap_block()
2072 stripe_nr = div_u64(stripe_nr * map->num_stripes + i, in btrfs_rmap_block()
2073 map->sub_stripes); in btrfs_rmap_block()
2077 * instead of map->stripe_len in btrfs_rmap_block()
2101 static int exclude_super_stripes(struct btrfs_block_group *cache) in exclude_super_stripes() argument
2103 struct btrfs_fs_info *fs_info = cache->fs_info; in exclude_super_stripes()
2110 if (cache->start < BTRFS_SUPER_INFO_OFFSET) { in exclude_super_stripes()
2111 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; in exclude_super_stripes()
2112 cache->bytes_super += stripe_len; in exclude_super_stripes()
2113 ret = set_extent_bit(&fs_info->excluded_extents, cache->start, in exclude_super_stripes()
2114 cache->start + stripe_len - 1, in exclude_super_stripes()
2122 ret = btrfs_rmap_block(fs_info, cache->start, in exclude_super_stripes()
2131 "zoned: block group %llu must not contain super block", in exclude_super_stripes()
2132 cache->start); in exclude_super_stripes()
2133 return -EUCLEAN; in exclude_super_stripes()
2136 while (nr--) { in exclude_super_stripes()
2138 cache->start + cache->length - logical[nr]); in exclude_super_stripes()
2140 cache->bytes_super += len; in exclude_super_stripes()
2141 ret = set_extent_bit(&fs_info->excluded_extents, logical[nr], in exclude_super_stripes()
2142 logical[nr] + len - 1, in exclude_super_stripes()
2158 struct btrfs_block_group *cache; in btrfs_create_block_group_cache() local
2160 cache = kzalloc(sizeof(*cache), GFP_NOFS); in btrfs_create_block_group_cache()
2161 if (!cache) in btrfs_create_block_group_cache()
2164 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), in btrfs_create_block_group_cache()
2166 if (!cache->free_space_ctl) { in btrfs_create_block_group_cache()
2167 kfree(cache); in btrfs_create_block_group_cache()
2171 cache->start = start; in btrfs_create_block_group_cache()
2173 cache->fs_info = fs_info; in btrfs_create_block_group_cache()
2174 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); in btrfs_create_block_group_cache()
2176 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; in btrfs_create_block_group_cache()
2178 refcount_set(&cache->refs, 1); in btrfs_create_block_group_cache()
2179 spin_lock_init(&cache->lock); in btrfs_create_block_group_cache()
2180 init_rwsem(&cache->data_rwsem); in btrfs_create_block_group_cache()
2181 INIT_LIST_HEAD(&cache->list); in btrfs_create_block_group_cache()
2182 INIT_LIST_HEAD(&cache->cluster_list); in btrfs_create_block_group_cache()
2183 INIT_LIST_HEAD(&cache->bg_list); in btrfs_create_block_group_cache()
2184 INIT_LIST_HEAD(&cache->ro_list); in btrfs_create_block_group_cache()
2185 INIT_LIST_HEAD(&cache->discard_list); in btrfs_create_block_group_cache()
2186 INIT_LIST_HEAD(&cache->dirty_list); in btrfs_create_block_group_cache()
2187 INIT_LIST_HEAD(&cache->io_list); in btrfs_create_block_group_cache()
2188 INIT_LIST_HEAD(&cache->active_bg_list); in btrfs_create_block_group_cache()
2189 btrfs_init_free_space_ctl(cache, cache->free_space_ctl); in btrfs_create_block_group_cache()
2190 atomic_set(&cache->frozen, 0); in btrfs_create_block_group_cache()
2191 mutex_init(&cache->free_space_lock); in btrfs_create_block_group_cache()
2193 return cache; in btrfs_create_block_group_cache()
2197 * Iterate all chunks and verify that each of them has the corresponding block
2202 struct extent_map_tree *map_tree = &fs_info->mapping_tree; in check_chunk_block_group_mappings()
2209 read_lock(&map_tree->lock); in check_chunk_block_group_mappings()
2216 read_unlock(&map_tree->lock); in check_chunk_block_group_mappings()
2220 bg = btrfs_lookup_block_group(fs_info, em->start); in check_chunk_block_group_mappings()
2223 "chunk start=%llu len=%llu doesn't have corresponding block group", in check_chunk_block_group_mappings()
2224 em->start, em->len); in check_chunk_block_group_mappings()
2225 ret = -EUCLEAN; in check_chunk_block_group_mappings()
2229 if (bg->start != em->start || bg->length != em->len || in check_chunk_block_group_mappings()
2230 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != in check_chunk_block_group_mappings()
2231 (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { in check_chunk_block_group_mappings()
2233 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", in check_chunk_block_group_mappings()
2234 em->start, em->len, in check_chunk_block_group_mappings()
2235 em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, in check_chunk_block_group_mappings()
2236 bg->start, bg->length, in check_chunk_block_group_mappings()
2237 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); in check_chunk_block_group_mappings()
2238 ret = -EUCLEAN; in check_chunk_block_group_mappings()
2243 start = em->start + em->len; in check_chunk_block_group_mappings()
2255 struct btrfs_block_group *cache; in read_one_block_group() local
2259 ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); in read_one_block_group()
2261 cache = btrfs_create_block_group_cache(info, key->objectid); in read_one_block_group()
2262 if (!cache) in read_one_block_group()
2263 return -ENOMEM; in read_one_block_group()
2265 cache->length = key->offset; in read_one_block_group()
2266 cache->used = btrfs_stack_block_group_used(bgi); in read_one_block_group()
2267 cache->commit_used = cache->used; in read_one_block_group()
2268 cache->flags = btrfs_stack_block_group_flags(bgi); in read_one_block_group()
2269 cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi); in read_one_block_group()
2271 set_free_space_tree_thresholds(cache); in read_one_block_group()
2275 * When we mount with old space cache, we need to in read_one_block_group()
2279 * truncate the old free space cache inode and in read_one_block_group()
2282 * the new space cache info onto disk. in read_one_block_group()
2285 cache->disk_cache_state = BTRFS_DC_CLEAR; in read_one_block_group()
2287 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && in read_one_block_group()
2288 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { in read_one_block_group()
2290 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", in read_one_block_group()
2291 cache->start); in read_one_block_group()
2292 ret = -EINVAL; in read_one_block_group()
2296 ret = btrfs_load_block_group_zone_info(cache, false); in read_one_block_group()
2299 cache->start); in read_one_block_group()
2308 ret = exclude_super_stripes(cache); in read_one_block_group()
2311 btrfs_free_excluded_extents(cache); in read_one_block_group()
2317 * free space for a block group. So, we don't need any caching work. in read_one_block_group()
2328 btrfs_calc_zone_unusable(cache); in read_one_block_group()
2330 btrfs_free_excluded_extents(cache); in read_one_block_group()
2331 } else if (cache->length == cache->used) { in read_one_block_group()
2332 cache->cached = BTRFS_CACHE_FINISHED; in read_one_block_group()
2333 btrfs_free_excluded_extents(cache); in read_one_block_group()
2334 } else if (cache->used == 0) { in read_one_block_group()
2335 cache->cached = BTRFS_CACHE_FINISHED; in read_one_block_group()
2336 ret = btrfs_add_new_free_space(cache, cache->start, in read_one_block_group()
2337 cache->start + cache->length, NULL); in read_one_block_group()
2338 btrfs_free_excluded_extents(cache); in read_one_block_group()
2343 ret = btrfs_add_block_group_cache(info, cache); in read_one_block_group()
2345 btrfs_remove_free_space_cache(cache); in read_one_block_group()
2348 trace_btrfs_add_block_group(info, cache, 0); in read_one_block_group()
2349 btrfs_add_bg_to_space_info(info, cache); in read_one_block_group()
2351 set_avail_alloc_bits(info, cache->flags); in read_one_block_group()
2352 if (btrfs_chunk_writeable(info, cache->start)) { in read_one_block_group()
2353 if (cache->used == 0) { in read_one_block_group()
2354 ASSERT(list_empty(&cache->bg_list)); in read_one_block_group()
2356 btrfs_discard_queue_work(&info->discard_ctl, cache); in read_one_block_group()
2358 btrfs_mark_bg_unused(cache); in read_one_block_group()
2361 inc_block_group_ro(cache, 1); in read_one_block_group()
2366 btrfs_put_block_group(cache); in read_one_block_group()
2372 struct extent_map_tree *em_tree = &fs_info->mapping_tree; in fill_dummy_bgs()
2376 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { in fill_dummy_bgs()
2382 map = em->map_lookup; in fill_dummy_bgs()
2383 bg = btrfs_create_block_group_cache(fs_info, em->start); in fill_dummy_bgs()
2385 ret = -ENOMEM; in fill_dummy_bgs()
2389 /* Fill dummy cache as FULL */ in fill_dummy_bgs()
2390 bg->length = em->len; in fill_dummy_bgs()
2391 bg->flags = map->type; in fill_dummy_bgs()
2392 bg->cached = BTRFS_CACHE_FINISHED; in fill_dummy_bgs()
2393 bg->used = em->len; in fill_dummy_bgs()
2394 bg->flags = map->type; in fill_dummy_bgs()
2397 * We may have some valid block group cache added already, in in fill_dummy_bgs()
2400 if (ret == -EEXIST) { in fill_dummy_bgs()
2414 set_avail_alloc_bits(fs_info, bg->flags); in fill_dummy_bgs()
2426 struct btrfs_block_group *cache; in btrfs_read_block_groups() local
2434 * unsupported RO options. The fs can never be mounted read-write, so no in btrfs_read_block_groups()
2435 * need to waste time searching block group items. in btrfs_read_block_groups()
2440 if (!root || (btrfs_super_compat_ro_flags(info->super_copy) & in btrfs_read_block_groups()
2449 return -ENOMEM; in btrfs_read_block_groups()
2451 cache_gen = btrfs_super_cache_generation(info->super_copy); in btrfs_read_block_groups()
2453 btrfs_super_generation(info->super_copy) != cache_gen) in btrfs_read_block_groups()
2469 leaf = path->nodes[0]; in btrfs_read_block_groups()
2470 slot = path->slots[0]; in btrfs_read_block_groups()
2485 list_for_each_entry(space_info, &info->space_info, list) { in btrfs_read_block_groups()
2489 if (list_empty(&space_info->block_groups[i])) in btrfs_read_block_groups()
2491 cache = list_first_entry(&space_info->block_groups[i], in btrfs_read_block_groups()
2494 btrfs_sysfs_add_block_group_type(cache); in btrfs_read_block_groups()
2497 if (!(btrfs_get_alloc_profile(info, space_info->flags) & in btrfs_read_block_groups()
2504 * Avoid allocating from un-mirrored block group if there are in btrfs_read_block_groups()
2505 * mirrored block groups. in btrfs_read_block_groups()
2507 list_for_each_entry(cache, in btrfs_read_block_groups()
2508 &space_info->block_groups[BTRFS_RAID_RAID0], in btrfs_read_block_groups()
2510 inc_block_group_ro(cache, 1); in btrfs_read_block_groups()
2511 list_for_each_entry(cache, in btrfs_read_block_groups()
2512 &space_info->block_groups[BTRFS_RAID_SINGLE], in btrfs_read_block_groups()
2514 inc_block_group_ro(cache, 1); in btrfs_read_block_groups()
2524 * Try to fill the tree using dummy block groups so that the user can in btrfs_read_block_groups()
2542 struct btrfs_fs_info *fs_info = trans->fs_info; in insert_block_group_item()
2549 spin_lock(&block_group->lock); in insert_block_group_item()
2550 btrfs_set_stack_block_group_used(&bgi, block_group->used); in insert_block_group_item()
2552 block_group->global_root_id); in insert_block_group_item()
2553 btrfs_set_stack_block_group_flags(&bgi, block_group->flags); in insert_block_group_item()
2554 old_commit_used = block_group->commit_used; in insert_block_group_item()
2555 block_group->commit_used = block_group->used; in insert_block_group_item()
2556 key.objectid = block_group->start; in insert_block_group_item()
2558 key.offset = block_group->length; in insert_block_group_item()
2559 spin_unlock(&block_group->lock); in insert_block_group_item()
2563 spin_lock(&block_group->lock); in insert_block_group_item()
2564 block_group->commit_used = old_commit_used; in insert_block_group_item()
2565 spin_unlock(&block_group->lock); in insert_block_group_item()
2575 struct btrfs_fs_info *fs_info = device->fs_info; in insert_dev_extent()
2576 struct btrfs_root *root = fs_info->dev_root; in insert_dev_extent()
2583 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); in insert_dev_extent()
2584 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); in insert_dev_extent()
2587 return -ENOMEM; in insert_dev_extent()
2589 key.objectid = device->devid; in insert_dev_extent()
2596 leaf = path->nodes[0]; in insert_dev_extent()
2597 extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); in insert_dev_extent()
2619 struct btrfs_fs_info *fs_info = trans->fs_info; in insert_dev_extents()
2632 map = em->map_lookup; in insert_dev_extents()
2633 stripe_size = em->orig_block_len; in insert_dev_extents()
2640 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the in insert_dev_extents()
2644 mutex_lock(&fs_info->fs_devices->device_list_mutex); in insert_dev_extents()
2645 for (i = 0; i < map->num_stripes; i++) { in insert_dev_extents()
2646 device = map->stripes[i].dev; in insert_dev_extents()
2647 dev_offset = map->stripes[i].physical; in insert_dev_extents()
2654 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in insert_dev_extents()
2669 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_create_pending_block_groups()
2673 while (!list_empty(&trans->new_bgs)) { in btrfs_create_pending_block_groups()
2676 block_group = list_first_entry(&trans->new_bgs, in btrfs_create_pending_block_groups()
2682 index = btrfs_bg_flags_to_raid_index(block_group->flags); in btrfs_create_pending_block_groups()
2688 &block_group->runtime_flags)) { in btrfs_create_pending_block_groups()
2689 mutex_lock(&fs_info->chunk_mutex); in btrfs_create_pending_block_groups()
2691 mutex_unlock(&fs_info->chunk_mutex); in btrfs_create_pending_block_groups()
2695 ret = insert_dev_extents(trans, block_group->start, in btrfs_create_pending_block_groups()
2696 block_group->length); in btrfs_create_pending_block_groups()
2707 if (block_group->space_info->block_group_kobjs[index] == NULL) in btrfs_create_pending_block_groups()
2713 list_del_init(&block_group->bg_list); in btrfs_create_pending_block_groups()
2714 clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags); in btrfs_create_pending_block_groups()
2720 * For extent tree v2 we use the block_group_item->chunk_offset to point at our
2732 if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL)) in calculate_global_root_id()
2736 div64_u64_rem(offset, fs_info->nr_global_roots, &index); in calculate_global_root_id()
2744 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_make_block_group()
2745 struct btrfs_block_group *cache; in btrfs_make_block_group() local
2750 cache = btrfs_create_block_group_cache(fs_info, chunk_offset); in btrfs_make_block_group()
2751 if (!cache) in btrfs_make_block_group()
2752 return ERR_PTR(-ENOMEM); in btrfs_make_block_group()
2755 * Mark it as new before adding it to the rbtree of block groups or any in btrfs_make_block_group()
2759 set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags); in btrfs_make_block_group()
2761 cache->length = size; in btrfs_make_block_group()
2762 set_free_space_tree_thresholds(cache); in btrfs_make_block_group()
2763 cache->flags = type; in btrfs_make_block_group()
2764 cache->cached = BTRFS_CACHE_FINISHED; in btrfs_make_block_group()
2765 cache->global_root_id = calculate_global_root_id(fs_info, cache->start); in btrfs_make_block_group()
2768 set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags); in btrfs_make_block_group()
2770 ret = btrfs_load_block_group_zone_info(cache, true); in btrfs_make_block_group()
2772 btrfs_put_block_group(cache); in btrfs_make_block_group()
2776 ret = exclude_super_stripes(cache); in btrfs_make_block_group()
2779 btrfs_free_excluded_extents(cache); in btrfs_make_block_group()
2780 btrfs_put_block_group(cache); in btrfs_make_block_group()
2784 ret = btrfs_add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL); in btrfs_make_block_group()
2785 btrfs_free_excluded_extents(cache); in btrfs_make_block_group()
2787 btrfs_put_block_group(cache); in btrfs_make_block_group()
2793 * assigned to our block group. We want our bg to be added to the rbtree in btrfs_make_block_group()
2794 * with its ->space_info set. in btrfs_make_block_group()
2796 cache->space_info = btrfs_find_space_info(fs_info, cache->flags); in btrfs_make_block_group()
2797 ASSERT(cache->space_info); in btrfs_make_block_group()
2799 ret = btrfs_add_block_group_cache(fs_info, cache); in btrfs_make_block_group()
2801 btrfs_remove_free_space_cache(cache); in btrfs_make_block_group()
2802 btrfs_put_block_group(cache); in btrfs_make_block_group()
2807 * Now that our block group has its ->space_info set and is inserted in in btrfs_make_block_group()
2810 trace_btrfs_add_block_group(fs_info, cache, 1); in btrfs_make_block_group()
2811 btrfs_add_bg_to_space_info(fs_info, cache); in btrfs_make_block_group()
2815 if (btrfs_should_fragment_free_space(cache)) { in btrfs_make_block_group()
2816 cache->space_info->bytes_used += size >> 1; in btrfs_make_block_group()
2817 fragment_free_space(cache); in btrfs_make_block_group()
2821 list_add_tail(&cache->bg_list, &trans->new_bgs); in btrfs_make_block_group()
2822 trans->delayed_ref_updates++; in btrfs_make_block_group()
2826 return cache; in btrfs_make_block_group()
2830 * Mark one block group RO, can be called several times for the same block
2833 * @cache: the destination block group
2834 * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
2836 * block group RO.
2838 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, in btrfs_inc_block_group_ro() argument
2841 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_inc_block_group_ro()
2849 * This can only happen when we are doing read-only scrub on read-only in btrfs_inc_block_group_ro()
2851 * In that case we should not start a new transaction on read-only fs. in btrfs_inc_block_group_ro()
2854 if (sb_rdonly(fs_info->sb)) { in btrfs_inc_block_group_ro()
2855 mutex_lock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2856 ret = inc_block_group_ro(cache, 0); in btrfs_inc_block_group_ro()
2857 mutex_unlock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2869 * We're not allowed to set block groups readonly after the dirty in btrfs_inc_block_group_ro()
2870 * block group cache has started writing. If it already started, in btrfs_inc_block_group_ro()
2873 mutex_lock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2874 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { in btrfs_inc_block_group_ro()
2875 u64 transid = trans->transid; in btrfs_inc_block_group_ro()
2877 mutex_unlock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2890 * corresponding block group with the new raid level. in btrfs_inc_block_group_ro()
2892 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro()
2893 if (alloc_flags != cache->flags) { in btrfs_inc_block_group_ro()
2900 if (ret == -ENOSPC) in btrfs_inc_block_group_ro()
2907 ret = inc_block_group_ro(cache, 0); in btrfs_inc_block_group_ro()
2910 if (ret == -ETXTBSY) in btrfs_inc_block_group_ro()
2916 * we still want to try our best to mark the block group read-only. in btrfs_inc_block_group_ro()
2918 if (!do_chunk_alloc && ret == -ENOSPC && in btrfs_inc_block_group_ro()
2919 (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM)) in btrfs_inc_block_group_ro()
2922 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); in btrfs_inc_block_group_ro()
2930 ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true); in btrfs_inc_block_group_ro()
2934 ret = inc_block_group_ro(cache, 0); in btrfs_inc_block_group_ro()
2935 if (ret == -ETXTBSY) in btrfs_inc_block_group_ro()
2938 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { in btrfs_inc_block_group_ro()
2939 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro()
2940 mutex_lock(&fs_info->chunk_mutex); in btrfs_inc_block_group_ro()
2942 mutex_unlock(&fs_info->chunk_mutex); in btrfs_inc_block_group_ro()
2945 mutex_unlock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2951 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) in btrfs_dec_block_group_ro() argument
2953 struct btrfs_space_info *sinfo = cache->space_info; in btrfs_dec_block_group_ro()
2956 BUG_ON(!cache->ro); in btrfs_dec_block_group_ro()
2958 spin_lock(&sinfo->lock); in btrfs_dec_block_group_ro()
2959 spin_lock(&cache->lock); in btrfs_dec_block_group_ro()
2960 if (!--cache->ro) { in btrfs_dec_block_group_ro()
2961 if (btrfs_is_zoned(cache->fs_info)) { in btrfs_dec_block_group_ro()
2963 cache->zone_unusable = in btrfs_dec_block_group_ro()
2964 (cache->alloc_offset - cache->used) + in btrfs_dec_block_group_ro()
2965 (cache->length - cache->zone_capacity); in btrfs_dec_block_group_ro()
2966 sinfo->bytes_zone_unusable += cache->zone_unusable; in btrfs_dec_block_group_ro()
2967 sinfo->bytes_readonly -= cache->zone_unusable; in btrfs_dec_block_group_ro()
2969 num_bytes = cache->length - cache->reserved - in btrfs_dec_block_group_ro()
2970 cache->pinned - cache->bytes_super - in btrfs_dec_block_group_ro()
2971 cache->zone_unusable - cache->used; in btrfs_dec_block_group_ro()
2972 sinfo->bytes_readonly -= num_bytes; in btrfs_dec_block_group_ro()
2973 list_del_init(&cache->ro_list); in btrfs_dec_block_group_ro()
2975 spin_unlock(&cache->lock); in btrfs_dec_block_group_ro()
2976 spin_unlock(&sinfo->lock); in btrfs_dec_block_group_ro()
2981 struct btrfs_block_group *cache) in update_block_group_item() argument
2983 struct btrfs_fs_info *fs_info = trans->fs_info; in update_block_group_item()
2994 * Block group items update can be triggered out of commit transaction in update_block_group_item()
2996 * We cannot use cache->used directly outside of the spin lock, as it in update_block_group_item()
2999 spin_lock(&cache->lock); in update_block_group_item()
3000 old_commit_used = cache->commit_used; in update_block_group_item()
3001 used = cache->used; in update_block_group_item()
3003 if (cache->commit_used == used) { in update_block_group_item()
3004 spin_unlock(&cache->lock); in update_block_group_item()
3007 cache->commit_used = used; in update_block_group_item()
3008 spin_unlock(&cache->lock); in update_block_group_item()
3010 key.objectid = cache->start; in update_block_group_item()
3012 key.offset = cache->length; in update_block_group_item()
3017 ret = -ENOENT; in update_block_group_item()
3021 leaf = path->nodes[0]; in update_block_group_item()
3022 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); in update_block_group_item()
3025 cache->global_root_id); in update_block_group_item()
3026 btrfs_set_stack_block_group_flags(&bgi, cache->flags); in update_block_group_item()
3032 * We didn't update the block group item, need to revert commit_used in update_block_group_item()
3033 * unless the block group item didn't exist yet - this is to prevent a in update_block_group_item()
3034 * race with a concurrent insertion of the block group item, with in update_block_group_item()
3037 * insertion set it to a value greater than 0 - if the block group later in update_block_group_item()
3040 if (ret < 0 && ret != -ENOENT) { in update_block_group_item()
3041 spin_lock(&cache->lock); in update_block_group_item()
3042 cache->commit_used = old_commit_used; in update_block_group_item()
3043 spin_unlock(&cache->lock); in update_block_group_item()
3053 struct btrfs_fs_info *fs_info = block_group->fs_info; in cache_save_setup()
3054 struct btrfs_root *root = fs_info->tree_root; in cache_save_setup()
3067 * If this block group is smaller than 100 megs don't bother caching the in cache_save_setup()
3068 * block group. in cache_save_setup()
3070 if (block_group->length < (100 * SZ_1M)) { in cache_save_setup()
3071 spin_lock(&block_group->lock); in cache_save_setup()
3072 block_group->disk_cache_state = BTRFS_DC_WRITTEN; in cache_save_setup()
3073 spin_unlock(&block_group->lock); in cache_save_setup()
3081 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { in cache_save_setup()
3091 if (block_group->ro) in cache_save_setup()
3102 * from here on out we know not to trust this cache when we load up next in cache_save_setup()
3105 BTRFS_I(inode)->generation = 0; in cache_save_setup()
3110 * super cache generation to 0 so we know to invalidate the in cache_save_setup()
3111 * cache, but then we'd have to keep track of the block groups in cache_save_setup()
3112 * that fail this way so we know we _have_ to reset this cache in cache_save_setup()
3113 * before the next commit or risk reading stale cache. So to in cache_save_setup()
3124 if (block_group->cache_generation == trans->transid && in cache_save_setup()
3132 &fs_info->global_block_rsv); in cache_save_setup()
3141 spin_lock(&block_group->lock); in cache_save_setup()
3142 if (block_group->cached != BTRFS_CACHE_FINISHED || in cache_save_setup()
3151 spin_unlock(&block_group->lock); in cache_save_setup()
3154 spin_unlock(&block_group->lock); in cache_save_setup()
3157 * We hit an ENOSPC when setting up the cache in this transaction, just in cache_save_setup()
3158 * skip doing the setup, we've already cleared the cache so we're safe. in cache_save_setup()
3160 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { in cache_save_setup()
3161 ret = -ENOSPC; in cache_save_setup()
3166 * Try to preallocate enough space based on how big the block group is. in cache_save_setup()
3169 * cache. in cache_save_setup()
3171 cache_size = div_u64(block_group->length, SZ_256M); in cache_save_setup()
3176 cache_size *= fs_info->sectorsize; in cache_save_setup()
3187 * Our cache requires contiguous chunks so that we don't modify a bunch in cache_save_setup()
3188 * of metadata or split extents when writing the cache out, which means in cache_save_setup()
3191 * other block groups for this transaction, maybe we'll unpin enough in cache_save_setup()
3196 else if (ret == -ENOSPC) in cache_save_setup()
3197 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); in cache_save_setup()
3204 spin_lock(&block_group->lock); in cache_save_setup()
3206 block_group->cache_generation = trans->transid; in cache_save_setup()
3207 block_group->disk_cache_state = dcs; in cache_save_setup()
3208 spin_unlock(&block_group->lock); in cache_save_setup()
3216 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_setup_space_cache()
3217 struct btrfs_block_group *cache, *tmp; in btrfs_setup_space_cache() local
3218 struct btrfs_transaction *cur_trans = trans->transaction; in btrfs_setup_space_cache()
3221 if (list_empty(&cur_trans->dirty_bgs) || in btrfs_setup_space_cache()
3227 return -ENOMEM; in btrfs_setup_space_cache()
3229 /* Could add new block groups, use _safe just in case */ in btrfs_setup_space_cache()
3230 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, in btrfs_setup_space_cache()
3232 if (cache->disk_cache_state == BTRFS_DC_CLEAR) in btrfs_setup_space_cache()
3233 cache_save_setup(cache, trans, path); in btrfs_setup_space_cache()
3241 * Transaction commit does final block group cache writeback during a critical
3243 * order for the cache to actually match the block group, but can introduce a
3246 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
3247 * There's a chance we'll have to redo some of it if the block group changes
3249 * getting rid of the easy block groups while we're still allowing others to
3254 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_start_dirty_block_groups()
3255 struct btrfs_block_group *cache; in btrfs_start_dirty_block_groups() local
3256 struct btrfs_transaction *cur_trans = trans->transaction; in btrfs_start_dirty_block_groups()
3261 struct list_head *io = &cur_trans->io_bgs; in btrfs_start_dirty_block_groups()
3264 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3265 if (list_empty(&cur_trans->dirty_bgs)) { in btrfs_start_dirty_block_groups()
3266 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3269 list_splice_init(&cur_trans->dirty_bgs, &dirty); in btrfs_start_dirty_block_groups()
3270 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3273 /* Make sure all the block groups on our dirty list actually exist */ in btrfs_start_dirty_block_groups()
3279 ret = -ENOMEM; in btrfs_start_dirty_block_groups()
3286 * removal of empty block groups deleting this block group while we are in btrfs_start_dirty_block_groups()
3287 * writing out the cache in btrfs_start_dirty_block_groups()
3289 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
3293 cache = list_first_entry(&dirty, struct btrfs_block_group, in btrfs_start_dirty_block_groups()
3296 * This can happen if something re-dirties a block group that in btrfs_start_dirty_block_groups()
3300 if (!list_empty(&cache->io_list)) { in btrfs_start_dirty_block_groups()
3301 list_del_init(&cache->io_list); in btrfs_start_dirty_block_groups()
3302 btrfs_wait_cache_io(trans, cache, path); in btrfs_start_dirty_block_groups()
3303 btrfs_put_block_group(cache); in btrfs_start_dirty_block_groups()
3308 * btrfs_wait_cache_io uses the cache->dirty_list to decide if in btrfs_start_dirty_block_groups()
3315 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3316 list_del_init(&cache->dirty_list); in btrfs_start_dirty_block_groups()
3317 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3321 cache_save_setup(cache, trans, path); in btrfs_start_dirty_block_groups()
3323 if (cache->disk_cache_state == BTRFS_DC_SETUP) { in btrfs_start_dirty_block_groups()
3324 cache->io_ctl.inode = NULL; in btrfs_start_dirty_block_groups()
3325 ret = btrfs_write_out_cache(trans, cache, path); in btrfs_start_dirty_block_groups()
3326 if (ret == 0 && cache->io_ctl.inode) { in btrfs_start_dirty_block_groups()
3334 list_add_tail(&cache->io_list, io); in btrfs_start_dirty_block_groups()
3337 * If we failed to write the cache, the in btrfs_start_dirty_block_groups()
3344 ret = update_block_group_item(trans, path, cache); in btrfs_start_dirty_block_groups()
3346 * Our block group might still be attached to the list in btrfs_start_dirty_block_groups()
3347 * of new block groups in the transaction handle of some in btrfs_start_dirty_block_groups()
3348 * other task (struct btrfs_trans_handle->new_bgs). This in btrfs_start_dirty_block_groups()
3349 * means its block group item isn't yet in the extent in btrfs_start_dirty_block_groups()
3354 if (ret == -ENOENT) { in btrfs_start_dirty_block_groups()
3356 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3357 if (list_empty(&cache->dirty_list)) { in btrfs_start_dirty_block_groups()
3358 list_add_tail(&cache->dirty_list, in btrfs_start_dirty_block_groups()
3359 &cur_trans->dirty_bgs); in btrfs_start_dirty_block_groups()
3360 btrfs_get_block_group(cache); in btrfs_start_dirty_block_groups()
3363 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3369 /* If it's not on the io list, we need to put the block group */ in btrfs_start_dirty_block_groups()
3371 btrfs_put_block_group(cache); in btrfs_start_dirty_block_groups()
3376 * us from writing caches for block groups that are going to be in btrfs_start_dirty_block_groups()
3379 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
3382 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
3384 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
3394 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3395 list_splice_init(&cur_trans->dirty_bgs, &dirty); in btrfs_start_dirty_block_groups()
3397 * dirty_bgs_lock protects us from concurrent block group in btrfs_start_dirty_block_groups()
3401 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3404 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3408 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3409 list_splice_init(&dirty, &cur_trans->dirty_bgs); in btrfs_start_dirty_block_groups()
3410 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3420 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_write_dirty_block_groups()
3421 struct btrfs_block_group *cache; in btrfs_write_dirty_block_groups() local
3422 struct btrfs_transaction *cur_trans = trans->transaction; in btrfs_write_dirty_block_groups()
3426 struct list_head *io = &cur_trans->io_bgs; in btrfs_write_dirty_block_groups()
3430 return -ENOMEM; in btrfs_write_dirty_block_groups()
3435 * transaction's list of dirty block groups. These tasks correspond to in btrfs_write_dirty_block_groups()
3437 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can in btrfs_write_dirty_block_groups()
3438 * allocate new block groups as a result of COWing nodes of the root in btrfs_write_dirty_block_groups()
3447 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3448 while (!list_empty(&cur_trans->dirty_bgs)) { in btrfs_write_dirty_block_groups()
3449 cache = list_first_entry(&cur_trans->dirty_bgs, in btrfs_write_dirty_block_groups()
3454 * This can happen if cache_save_setup re-dirties a block group in btrfs_write_dirty_block_groups()
3458 if (!list_empty(&cache->io_list)) { in btrfs_write_dirty_block_groups()
3459 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3460 list_del_init(&cache->io_list); in btrfs_write_dirty_block_groups()
3461 btrfs_wait_cache_io(trans, cache, path); in btrfs_write_dirty_block_groups()
3462 btrfs_put_block_group(cache); in btrfs_write_dirty_block_groups()
3463 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3470 list_del_init(&cache->dirty_list); in btrfs_write_dirty_block_groups()
3471 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3474 cache_save_setup(cache, trans, path); in btrfs_write_dirty_block_groups()
3478 (unsigned long) -1); in btrfs_write_dirty_block_groups()
3480 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { in btrfs_write_dirty_block_groups()
3481 cache->io_ctl.inode = NULL; in btrfs_write_dirty_block_groups()
3482 ret = btrfs_write_out_cache(trans, cache, path); in btrfs_write_dirty_block_groups()
3483 if (ret == 0 && cache->io_ctl.inode) { in btrfs_write_dirty_block_groups()
3485 list_add_tail(&cache->io_list, io); in btrfs_write_dirty_block_groups()
3488 * If we failed to write the cache, the in btrfs_write_dirty_block_groups()
3495 ret = update_block_group_item(trans, path, cache); in btrfs_write_dirty_block_groups()
3498 * created a new block group while updating a free space in btrfs_write_dirty_block_groups()
3499 * cache's inode (at inode.c:btrfs_finish_ordered_io()) in btrfs_write_dirty_block_groups()
3501 * which case the new block group is still attached to in btrfs_write_dirty_block_groups()
3503 * finished yet (no block group item in the extent tree in btrfs_write_dirty_block_groups()
3509 if (ret == -ENOENT) { in btrfs_write_dirty_block_groups()
3510 wait_event(cur_trans->writer_wait, in btrfs_write_dirty_block_groups()
3511 atomic_read(&cur_trans->num_writers) == 1); in btrfs_write_dirty_block_groups()
3512 ret = update_block_group_item(trans, path, cache); in btrfs_write_dirty_block_groups()
3518 /* If its not on the io list, we need to put the block group */ in btrfs_write_dirty_block_groups()
3520 btrfs_put_block_group(cache); in btrfs_write_dirty_block_groups()
3522 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3524 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3531 cache = list_first_entry(io, struct btrfs_block_group, in btrfs_write_dirty_block_groups()
3533 list_del_init(&cache->io_list); in btrfs_write_dirty_block_groups()
3534 btrfs_wait_cache_io(trans, cache, path); in btrfs_write_dirty_block_groups()
3535 btrfs_put_block_group(cache); in btrfs_write_dirty_block_groups()
3545 struct btrfs_fs_info *info = trans->fs_info; in btrfs_update_block_group()
3546 struct btrfs_block_group *cache = NULL; in btrfs_update_block_group() local
3553 /* Block accounting for super block */ in btrfs_update_block_group()
3554 spin_lock(&info->delalloc_root_lock); in btrfs_update_block_group()
3555 old_val = btrfs_super_bytes_used(info->super_copy); in btrfs_update_block_group()
3559 old_val -= num_bytes; in btrfs_update_block_group()
3560 btrfs_set_super_bytes_used(info->super_copy, old_val); in btrfs_update_block_group()
3561 spin_unlock(&info->delalloc_root_lock); in btrfs_update_block_group()
3567 cache = btrfs_lookup_block_group(info, bytenr); in btrfs_update_block_group()
3568 if (!cache) { in btrfs_update_block_group()
3569 ret = -ENOENT; in btrfs_update_block_group()
3572 space_info = cache->space_info; in btrfs_update_block_group()
3573 factor = btrfs_bg_type_to_factor(cache->flags); in btrfs_update_block_group()
3576 * If this block group has free space cache written out, we in btrfs_update_block_group()
3579 * space back to the block group, otherwise we will leak space. in btrfs_update_block_group()
3581 if (!alloc && !btrfs_block_group_done(cache)) in btrfs_update_block_group()
3582 btrfs_cache_block_group(cache, true); in btrfs_update_block_group()
3584 byte_in_group = bytenr - cache->start; in btrfs_update_block_group()
3585 WARN_ON(byte_in_group > cache->length); in btrfs_update_block_group()
3587 spin_lock(&space_info->lock); in btrfs_update_block_group()
3588 spin_lock(&cache->lock); in btrfs_update_block_group()
3591 cache->disk_cache_state < BTRFS_DC_CLEAR) in btrfs_update_block_group()
3592 cache->disk_cache_state = BTRFS_DC_CLEAR; in btrfs_update_block_group()
3594 old_val = cache->used; in btrfs_update_block_group()
3595 num_bytes = min(total, cache->length - byte_in_group); in btrfs_update_block_group()
3598 cache->used = old_val; in btrfs_update_block_group()
3599 cache->reserved -= num_bytes; in btrfs_update_block_group()
3600 space_info->bytes_reserved -= num_bytes; in btrfs_update_block_group()
3601 space_info->bytes_used += num_bytes; in btrfs_update_block_group()
3602 space_info->disk_used += num_bytes * factor; in btrfs_update_block_group()
3603 spin_unlock(&cache->lock); in btrfs_update_block_group()
3604 spin_unlock(&space_info->lock); in btrfs_update_block_group()
3606 old_val -= num_bytes; in btrfs_update_block_group()
3607 cache->used = old_val; in btrfs_update_block_group()
3608 cache->pinned += num_bytes; in btrfs_update_block_group()
3611 space_info->bytes_used -= num_bytes; in btrfs_update_block_group()
3612 space_info->disk_used -= num_bytes * factor; in btrfs_update_block_group()
3614 reclaim = should_reclaim_block_group(cache, num_bytes); in btrfs_update_block_group()
3616 spin_unlock(&cache->lock); in btrfs_update_block_group()
3617 spin_unlock(&space_info->lock); in btrfs_update_block_group()
3619 set_extent_bit(&trans->transaction->pinned_extents, in btrfs_update_block_group()
3620 bytenr, bytenr + num_bytes - 1, in btrfs_update_block_group()
3624 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_update_block_group()
3625 if (list_empty(&cache->dirty_list)) { in btrfs_update_block_group()
3626 list_add_tail(&cache->dirty_list, in btrfs_update_block_group()
3627 &trans->transaction->dirty_bgs); in btrfs_update_block_group()
3628 trans->delayed_ref_updates++; in btrfs_update_block_group()
3629 btrfs_get_block_group(cache); in btrfs_update_block_group()
3631 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_update_block_group()
3634 * No longer have used bytes in this block group, queue it for in btrfs_update_block_group()
3635 * deletion. We do this after adding the block group to the in btrfs_update_block_group()
3637 * cache writeout. in btrfs_update_block_group()
3641 btrfs_mark_bg_unused(cache); in btrfs_update_block_group()
3643 btrfs_mark_bg_to_reclaim(cache); in btrfs_update_block_group()
3646 btrfs_put_block_group(cache); in btrfs_update_block_group()
3647 total -= num_bytes; in btrfs_update_block_group()
3651 /* Modified block groups are accounted for in the delayed_refs_rsv. */ in btrfs_update_block_group()
3659 * @cache: The cache we are manipulating
3666 * reservation and the block group has become read only we cannot make the
3667 * reservation and return -EAGAIN, otherwise this function always succeeds.
3669 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, in btrfs_add_reserved_bytes() argument
3673 struct btrfs_space_info *space_info = cache->space_info; in btrfs_add_reserved_bytes()
3677 spin_lock(&space_info->lock); in btrfs_add_reserved_bytes()
3678 spin_lock(&cache->lock); in btrfs_add_reserved_bytes()
3679 if (cache->ro) { in btrfs_add_reserved_bytes()
3680 ret = -EAGAIN; in btrfs_add_reserved_bytes()
3684 if (btrfs_block_group_should_use_size_class(cache)) { in btrfs_add_reserved_bytes()
3686 ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class); in btrfs_add_reserved_bytes()
3690 cache->reserved += num_bytes; in btrfs_add_reserved_bytes()
3691 space_info->bytes_reserved += num_bytes; in btrfs_add_reserved_bytes()
3692 trace_btrfs_space_reservation(cache->fs_info, "space_info", in btrfs_add_reserved_bytes()
3693 space_info->flags, num_bytes, 1); in btrfs_add_reserved_bytes()
3694 btrfs_space_info_update_bytes_may_use(cache->fs_info, in btrfs_add_reserved_bytes()
3695 space_info, -ram_bytes); in btrfs_add_reserved_bytes()
3697 cache->delalloc_bytes += num_bytes; in btrfs_add_reserved_bytes()
3704 btrfs_try_granting_tickets(cache->fs_info, space_info); in btrfs_add_reserved_bytes()
3706 spin_unlock(&cache->lock); in btrfs_add_reserved_bytes()
3707 spin_unlock(&space_info->lock); in btrfs_add_reserved_bytes()
3714 * @cache: The cache we are manipulating
3723 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, in btrfs_free_reserved_bytes() argument
3726 struct btrfs_space_info *space_info = cache->space_info; in btrfs_free_reserved_bytes()
3728 spin_lock(&space_info->lock); in btrfs_free_reserved_bytes()
3729 spin_lock(&cache->lock); in btrfs_free_reserved_bytes()
3730 if (cache->ro) in btrfs_free_reserved_bytes()
3731 space_info->bytes_readonly += num_bytes; in btrfs_free_reserved_bytes()
3732 cache->reserved -= num_bytes; in btrfs_free_reserved_bytes()
3733 space_info->bytes_reserved -= num_bytes; in btrfs_free_reserved_bytes()
3734 space_info->max_extent_size = 0; in btrfs_free_reserved_bytes()
3737 cache->delalloc_bytes -= num_bytes; in btrfs_free_reserved_bytes()
3738 spin_unlock(&cache->lock); in btrfs_free_reserved_bytes()
3740 btrfs_try_granting_tickets(cache->fs_info, space_info); in btrfs_free_reserved_bytes()
3741 spin_unlock(&space_info->lock); in btrfs_free_reserved_bytes()
3746 struct list_head *head = &info->space_info; in force_metadata_allocation()
3750 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) in force_metadata_allocation()
3751 found->force_alloc = CHUNK_ALLOC_FORCE; in force_metadata_allocation()
3769 thresh = btrfs_super_total_bytes(fs_info->super_copy); in should_alloc_chunk()
3772 if (sinfo->total_bytes - bytes_used < thresh) in should_alloc_chunk()
3776 if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80)) in should_alloc_chunk()
3783 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); in btrfs_force_chunk_alloc()
3797 * system block group if needed. in do_chunk_alloc()
3809 * Normally we are not expected to fail with -ENOSPC here, since we have in do_chunk_alloc()
3814 * existing system block groups have a profile which can not be used in do_chunk_alloc()
3821 * none of the block groups can be used for extent allocation since they in do_chunk_alloc()
3825 * block groups and check if they have a usable profile and enough space in do_chunk_alloc()
3826 * can be slow on very large filesystems, so we tolerate the -ENOSPC and in do_chunk_alloc()
3832 * block group to allocate from when we called check_system_chunk() in do_chunk_alloc()
3833 * above. However right after we called it, the only system block group in do_chunk_alloc()
3837 * handle and scrub uses the commit root to search for block groups; in do_chunk_alloc()
3839 * 3) We had one system block group with enough free space when we called in do_chunk_alloc()
3843 * block group (discard removes a free space entry, discards it, and in do_chunk_alloc()
3844 * then adds back the entry to the block group cache). in do_chunk_alloc()
3846 if (ret == -ENOSPC) { in do_chunk_alloc()
3847 const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); in do_chunk_alloc()
3885 * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for
3886 * the chunk, the chunk mapping, create its block group and add the items
3887 * that belong in the chunk btree to it - more specifically, we need to
3890 * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block
3896 * trigger chunk allocation and attempted to insert the new block group item
3907 * allocate a new block group (chunk) because the only one that had enough
3909 * device replace, block group reclaim thread, etc), so we can not use it
3914 * the filesystem was mounted in degraded mode, none of the existing block
3916 * profile (for e.g. mounting a 2 devices filesystem, where all block groups
3919 * example, it will trigger allocation of a new metadata block group with a
3925 * example, it does not find any free extent in any metadata block group,
3926 * therefore forced to try to allocate a new metadata block group.
3928 * meanwhile - this typically happens with tasks that don't reserve space
3937 * the only metadata block group that had free space (discard starts by
3938 * removing a free space entry from a block group, then does the discard
3940 * block group).
3943 * a seed device - we must create new metadata and system chunks without adding
3944 * any of the block group items to the chunk, extent and device btrees. If we
3946 * btrees, since all the chunks from the seed device are read-only.
3961 * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or
3963 * a modification to the chunk btree - use cases for the later are adding,
3969 * holding fs_info->chunk_mutex. This is important to guarantee that while COWing
3975 * that mutex. The same logic applies to removing chunks - we must reserve system
3977 * while holding fs_info->chunk_mutex.
3982 * - return 1 if it successfully allocates a chunk,
3983 * - return errors including -ENOSPC otherwise.
3985 * - return 0 if it doesn't need to allocate a new chunk,
3986 * - return 1 if it successfully allocates a chunk,
3987 * - return errors including -ENOSPC otherwise.
3992 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_chunk_alloc()
4005 /* Don't re-enter if we're already allocating a chunk */ in btrfs_chunk_alloc()
4006 if (trans->allocating_chunk) in btrfs_chunk_alloc()
4007 return -ENOSPC; in btrfs_chunk_alloc()
4017 * lock on it and on its parent - if the COW operation triggers a system in btrfs_chunk_alloc()
4026 * here - this happens in the cases described above at do_chunk_alloc(). in btrfs_chunk_alloc()
4030 return -ENOSPC; in btrfs_chunk_alloc()
4036 spin_lock(&space_info->lock); in btrfs_chunk_alloc()
4037 if (force < space_info->force_alloc) in btrfs_chunk_alloc()
4038 force = space_info->force_alloc; in btrfs_chunk_alloc()
4040 if (space_info->full) { in btrfs_chunk_alloc()
4043 ret = -ENOSPC; in btrfs_chunk_alloc()
4046 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4049 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4051 } else if (space_info->chunk_alloc) { in btrfs_chunk_alloc()
4053 * Someone is already allocating, so we need to block in btrfs_chunk_alloc()
4060 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4061 mutex_lock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
4062 mutex_unlock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
4065 space_info->chunk_alloc = 1; in btrfs_chunk_alloc()
4067 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4073 mutex_lock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
4074 trans->allocating_chunk = true; in btrfs_chunk_alloc()
4088 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { in btrfs_chunk_alloc()
4089 fs_info->data_chunk_allocations++; in btrfs_chunk_alloc()
4090 if (!(fs_info->data_chunk_allocations % in btrfs_chunk_alloc()
4091 fs_info->metadata_ratio)) in btrfs_chunk_alloc()
4096 trans->allocating_chunk = false; in btrfs_chunk_alloc()
4102 * New block group is likely to be used soon. Try to activate in btrfs_chunk_alloc()
4111 spin_lock(&space_info->lock); in btrfs_chunk_alloc()
4113 if (ret == -ENOSPC) in btrfs_chunk_alloc()
4114 space_info->full = 1; in btrfs_chunk_alloc()
4119 space_info->max_extent_size = 0; in btrfs_chunk_alloc()
4122 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; in btrfs_chunk_alloc()
4124 space_info->chunk_alloc = 0; in btrfs_chunk_alloc()
4125 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4126 mutex_unlock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
4137 num_dev = fs_info->fs_devices->rw_devices; in get_profile_num_devs()
4146 struct btrfs_fs_info *fs_info = trans->fs_info; in reserve_chunk_space()
4153 * atomic and race free space reservation in the chunk block reserve. in reserve_chunk_space()
4155 lockdep_assert_held(&fs_info->chunk_mutex); in reserve_chunk_space()
4158 spin_lock(&info->lock); in reserve_chunk_space()
4159 left = info->total_bytes - btrfs_space_info_used(info, true); in reserve_chunk_space()
4160 spin_unlock(&info->lock); in reserve_chunk_space()
4195 * the cases described at do_chunk_alloc() - the system in reserve_chunk_space()
4196 * block group we just created was just turned into RO in reserve_chunk_space()
4206 &fs_info->chunk_block_rsv, in reserve_chunk_space()
4209 trans->chunk_bytes_reserved += bytes; in reserve_chunk_space()
4215 * The caller must be holding fs_info->chunk_mutex.
4219 struct btrfs_fs_info *fs_info = trans->fs_info; in check_system_chunk()
4240 * block group allocation and removal, to avoid a deadlock with a concurrent
4241 * task that is allocating a metadata or data block group and therefore needs to
4249 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_reserve_chunk_metadata()
4257 mutex_lock(&fs_info->chunk_mutex); in btrfs_reserve_chunk_metadata()
4259 mutex_unlock(&fs_info->chunk_mutex); in btrfs_reserve_chunk_metadata()
4269 spin_lock(&block_group->lock); in btrfs_put_block_group_cache()
4271 &block_group->runtime_flags)) { in btrfs_put_block_group_cache()
4272 struct inode *inode = block_group->inode; in btrfs_put_block_group_cache()
4274 block_group->inode = NULL; in btrfs_put_block_group_cache()
4275 spin_unlock(&block_group->lock); in btrfs_put_block_group_cache()
4277 ASSERT(block_group->io_ctl.inode == NULL); in btrfs_put_block_group_cache()
4280 spin_unlock(&block_group->lock); in btrfs_put_block_group_cache()
4287 * Must be called only after stopping all workers, since we could have block
4289 * freed the block groups before stopping them.
4299 if (info->active_meta_bg) { in btrfs_free_block_groups()
4300 btrfs_put_block_group(info->active_meta_bg); in btrfs_free_block_groups()
4301 info->active_meta_bg = NULL; in btrfs_free_block_groups()
4303 if (info->active_system_bg) { in btrfs_free_block_groups()
4304 btrfs_put_block_group(info->active_system_bg); in btrfs_free_block_groups()
4305 info->active_system_bg = NULL; in btrfs_free_block_groups()
4309 write_lock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4310 while (!list_empty(&info->caching_block_groups)) { in btrfs_free_block_groups()
4311 caching_ctl = list_entry(info->caching_block_groups.next, in btrfs_free_block_groups()
4313 list_del(&caching_ctl->list); in btrfs_free_block_groups()
4316 write_unlock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4318 spin_lock(&info->unused_bgs_lock); in btrfs_free_block_groups()
4319 while (!list_empty(&info->unused_bgs)) { in btrfs_free_block_groups()
4320 block_group = list_first_entry(&info->unused_bgs, in btrfs_free_block_groups()
4323 list_del_init(&block_group->bg_list); in btrfs_free_block_groups()
4327 while (!list_empty(&info->reclaim_bgs)) { in btrfs_free_block_groups()
4328 block_group = list_first_entry(&info->reclaim_bgs, in btrfs_free_block_groups()
4331 list_del_init(&block_group->bg_list); in btrfs_free_block_groups()
4334 spin_unlock(&info->unused_bgs_lock); in btrfs_free_block_groups()
4336 spin_lock(&info->zone_active_bgs_lock); in btrfs_free_block_groups()
4337 while (!list_empty(&info->zone_active_bgs)) { in btrfs_free_block_groups()
4338 block_group = list_first_entry(&info->zone_active_bgs, in btrfs_free_block_groups()
4341 list_del_init(&block_group->active_bg_list); in btrfs_free_block_groups()
4344 spin_unlock(&info->zone_active_bgs_lock); in btrfs_free_block_groups()
4346 write_lock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4347 while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) { in btrfs_free_block_groups()
4350 rb_erase_cached(&block_group->cache_node, in btrfs_free_block_groups()
4351 &info->block_group_cache_tree); in btrfs_free_block_groups()
4352 RB_CLEAR_NODE(&block_group->cache_node); in btrfs_free_block_groups()
4353 write_unlock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4355 down_write(&block_group->space_info->groups_sem); in btrfs_free_block_groups()
4356 list_del(&block_group->list); in btrfs_free_block_groups()
4357 up_write(&block_group->space_info->groups_sem); in btrfs_free_block_groups()
4360 * We haven't cached this block group, which means we could in btrfs_free_block_groups()
4361 * possibly have excluded extents on this block group. in btrfs_free_block_groups()
4363 if (block_group->cached == BTRFS_CACHE_NO || in btrfs_free_block_groups()
4364 block_group->cached == BTRFS_CACHE_ERROR) in btrfs_free_block_groups()
4368 ASSERT(block_group->cached != BTRFS_CACHE_STARTED); in btrfs_free_block_groups()
4369 ASSERT(list_empty(&block_group->dirty_list)); in btrfs_free_block_groups()
4370 ASSERT(list_empty(&block_group->io_list)); in btrfs_free_block_groups()
4371 ASSERT(list_empty(&block_group->bg_list)); in btrfs_free_block_groups()
4372 ASSERT(refcount_read(&block_group->refs) == 1); in btrfs_free_block_groups()
4373 ASSERT(block_group->swap_extents == 0); in btrfs_free_block_groups()
4376 write_lock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4378 write_unlock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4382 while (!list_empty(&info->space_info)) { in btrfs_free_block_groups()
4383 space_info = list_entry(info->space_info.next, in btrfs_free_block_groups()
4391 if (WARN_ON(space_info->bytes_pinned > 0 || in btrfs_free_block_groups()
4392 space_info->bytes_may_use > 0)) in btrfs_free_block_groups()
4402 if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || in btrfs_free_block_groups()
4404 if (WARN_ON(space_info->bytes_reserved > 0)) in btrfs_free_block_groups()
4408 WARN_ON(space_info->reclaim_size > 0); in btrfs_free_block_groups()
4409 list_del(&space_info->list); in btrfs_free_block_groups()
4415 void btrfs_freeze_block_group(struct btrfs_block_group *cache) in btrfs_freeze_block_group() argument
4417 atomic_inc(&cache->frozen); in btrfs_freeze_block_group()
4422 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_unfreeze_block_group()
4427 spin_lock(&block_group->lock); in btrfs_unfreeze_block_group()
4428 cleanup = (atomic_dec_and_test(&block_group->frozen) && in btrfs_unfreeze_block_group()
4429 test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)); in btrfs_unfreeze_block_group()
4430 spin_unlock(&block_group->lock); in btrfs_unfreeze_block_group()
4433 em_tree = &fs_info->mapping_tree; in btrfs_unfreeze_block_group()
4434 write_lock(&em_tree->lock); in btrfs_unfreeze_block_group()
4435 em = lookup_extent_mapping(em_tree, block_group->start, in btrfs_unfreeze_block_group()
4439 write_unlock(&em_tree->lock); in btrfs_unfreeze_block_group()
4447 * tasks trimming this block group have left 1 entry each one. in btrfs_unfreeze_block_group()
4458 spin_lock(&bg->lock); in btrfs_inc_block_group_swap_extents()
4459 if (bg->ro) in btrfs_inc_block_group_swap_extents()
4462 bg->swap_extents++; in btrfs_inc_block_group_swap_extents()
4463 spin_unlock(&bg->lock); in btrfs_inc_block_group_swap_extents()
4470 spin_lock(&bg->lock); in btrfs_dec_block_group_swap_extents()
4471 ASSERT(!bg->ro); in btrfs_dec_block_group_swap_extents()
4472 ASSERT(bg->swap_extents >= amount); in btrfs_dec_block_group_swap_extents()
4473 bg->swap_extents -= amount; in btrfs_dec_block_group_swap_extents()
4474 spin_unlock(&bg->lock); in btrfs_dec_block_group_swap_extents()
4487 * Handle a block group allocating an extent in a size class
4489 * @bg: The block group we allocated in.
4494 * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the
4498 * find_free_extent will skip block groups with a mismatched size class until
4500 * force_wrong_size_class. However, if a block group is newly allocated and
4512 if (bg->size_class == size_class) in btrfs_use_block_group_size_class()
4524 if (bg->size_class != BTRFS_BG_SZ_NONE) { in btrfs_use_block_group_size_class()
4527 return -EAGAIN; in btrfs_use_block_group_size_class()
4530 * The happy new block group case: the new allocation is the first in btrfs_use_block_group_size_class()
4533 bg->size_class = size_class; in btrfs_use_block_group_size_class()
4540 if (btrfs_is_zoned(bg->fs_info)) in btrfs_block_group_should_use_size_class()