1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_BLOCK_GROUP_H
4 #define BTRFS_BLOCK_GROUP_H
5 
6 #include "free-space-cache.h"
7 
8 enum btrfs_disk_cache_state {
9 	BTRFS_DC_WRITTEN,
10 	BTRFS_DC_ERROR,
11 	BTRFS_DC_CLEAR,
12 	BTRFS_DC_SETUP,
13 };
14 
15 /*
16  * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to
17  * only allocate a chunk if we really need one.
18  *
19  * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few
20  * chunks already allocated.  This is used as part of the clustering code to
21  * help make sure we have a good pool of storage to cluster in, without filling
22  * the FS with empty chunks
23  *
24  * CHUNK_ALLOC_FORCE means it must try to allocate one
25  */
26 enum btrfs_chunk_alloc_enum {
27 	CHUNK_ALLOC_NO_FORCE,
28 	CHUNK_ALLOC_LIMITED,
29 	CHUNK_ALLOC_FORCE,
30 };
31 
32 struct btrfs_caching_control {
33 	struct list_head list;
34 	struct mutex mutex;
35 	wait_queue_head_t wait;
36 	struct btrfs_work work;
37 	struct btrfs_block_group_cache *block_group;
38 	u64 progress;
39 	refcount_t count;
40 };
41 
42 /* Once caching_thread() finds this much free space, it will wake up waiters. */
43 #define CACHING_CTL_WAKE_UP SZ_2M
44 
45 struct btrfs_block_group_cache {
46 	struct btrfs_key key;
47 	struct btrfs_block_group_item item;
48 	struct btrfs_fs_info *fs_info;
49 	struct inode *inode;
50 	spinlock_t lock;
51 	u64 pinned;
52 	u64 reserved;
53 	u64 delalloc_bytes;
54 	u64 bytes_super;
55 	u64 flags;
56 	u64 cache_generation;
57 
58 	/*
59 	 * If the free space extent count exceeds this number, convert the block
60 	 * group to bitmaps.
61 	 */
62 	u32 bitmap_high_thresh;
63 
64 	/*
65 	 * If the free space extent count drops below this number, convert the
66 	 * block group back to extents.
67 	 */
68 	u32 bitmap_low_thresh;
69 
70 	/*
71 	 * It is just used for the delayed data space allocation because
72 	 * only the data space allocation and the relative metadata update
73 	 * can be done cross the transaction.
74 	 */
75 	struct rw_semaphore data_rwsem;
76 
77 	/* For raid56, this is a full stripe, without parity */
78 	unsigned long full_stripe_len;
79 
80 	unsigned int ro;
81 	unsigned int iref:1;
82 	unsigned int has_caching_ctl:1;
83 	unsigned int removed:1;
84 
85 	int disk_cache_state;
86 
87 	/* Cache tracking stuff */
88 	int cached;
89 	struct btrfs_caching_control *caching_ctl;
90 	u64 last_byte_to_unpin;
91 
92 	struct btrfs_space_info *space_info;
93 
94 	/* Free space cache stuff */
95 	struct btrfs_free_space_ctl *free_space_ctl;
96 
97 	/* Block group cache stuff */
98 	struct rb_node cache_node;
99 
100 	/* For block groups in the same raid type */
101 	struct list_head list;
102 
103 	/* Usage count */
104 	atomic_t count;
105 
106 	/*
107 	 * List of struct btrfs_free_clusters for this block group.
108 	 * Today it will only have one thing on it, but that may change
109 	 */
110 	struct list_head cluster_list;
111 
112 	/* For delayed block group creation or deletion of empty block groups */
113 	struct list_head bg_list;
114 
115 	/* For read-only block groups */
116 	struct list_head ro_list;
117 
118 	atomic_t trimming;
119 
120 	/* For dirty block groups */
121 	struct list_head dirty_list;
122 	struct list_head io_list;
123 
124 	struct btrfs_io_ctl io_ctl;
125 
126 	/*
127 	 * Incremented when doing extent allocations and holding a read lock
128 	 * on the space_info's groups_sem semaphore.
129 	 * Decremented when an ordered extent that represents an IO against this
130 	 * block group's range is created (after it's added to its inode's
131 	 * root's list of ordered extents) or immediately after the allocation
132 	 * if it's a metadata extent or fallocate extent (for these cases we
133 	 * don't create ordered extents).
134 	 */
135 	atomic_t reservations;
136 
137 	/*
138 	 * Incremented while holding the spinlock *lock* by a task checking if
139 	 * it can perform a nocow write (incremented if the value for the *ro*
140 	 * field is 0). Decremented by such tasks once they create an ordered
141 	 * extent or before that if some error happens before reaching that step.
142 	 * This is to prevent races between block group relocation and nocow
143 	 * writes through direct IO.
144 	 */
145 	atomic_t nocow_writers;
146 
147 	/* Lock for free space tree operations. */
148 	struct mutex free_space_lock;
149 
150 	/*
151 	 * Does the block group need to be added to the free space tree?
152 	 * Protected by free_space_lock.
153 	 */
154 	int needs_free_space;
155 
156 	/* Record locked full stripes for RAID5/6 block group */
157 	struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
158 };
159 
160 #ifdef CONFIG_BTRFS_DEBUG
btrfs_should_fragment_free_space(struct btrfs_block_group_cache * block_group)161 static inline int btrfs_should_fragment_free_space(
162 		struct btrfs_block_group_cache *block_group)
163 {
164 	struct btrfs_fs_info *fs_info = block_group->fs_info;
165 
166 	return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
167 		block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
168 	       (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
169 		block_group->flags &  BTRFS_BLOCK_GROUP_DATA);
170 }
171 #endif
172 
173 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
174 		struct btrfs_fs_info *info, u64 bytenr);
175 struct btrfs_block_group_cache *btrfs_lookup_block_group(
176 		struct btrfs_fs_info *info, u64 bytenr);
177 struct btrfs_block_group_cache *btrfs_next_block_group(
178 		struct btrfs_block_group_cache *cache);
179 void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
180 void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
181 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
182 					const u64 start);
183 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
184 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
185 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
186 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
187 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
188 				           u64 num_bytes);
189 int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache);
190 int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
191 			    int load_cache_only);
192 void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
193 struct btrfs_caching_control *btrfs_get_caching_control(
194 		struct btrfs_block_group_cache *cache);
195 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
196 		       u64 start, u64 end);
197 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
198 				struct btrfs_fs_info *fs_info,
199 				const u64 chunk_offset);
200 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
201 			     u64 group_start, struct extent_map *em);
202 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
203 void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg);
204 int btrfs_read_block_groups(struct btrfs_fs_info *info);
205 int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
206 			   u64 type, u64 chunk_offset, u64 size);
207 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
208 int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache);
209 void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
210 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
211 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
212 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
213 int btrfs_update_block_group(struct btrfs_trans_handle *trans,
214 			     u64 bytenr, u64 num_bytes, int alloc);
215 int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
216 			     u64 ram_bytes, u64 num_bytes, int delalloc);
217 void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
218 			       u64 num_bytes, int delalloc);
219 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
220 		      enum btrfs_chunk_alloc_enum force);
221 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
222 void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
223 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
224 void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
225 int btrfs_free_block_groups(struct btrfs_fs_info *info);
226 
btrfs_data_alloc_profile(struct btrfs_fs_info * fs_info)227 static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
228 {
229 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
230 }
231 
btrfs_metadata_alloc_profile(struct btrfs_fs_info * fs_info)232 static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
233 {
234 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
235 }
236 
btrfs_system_alloc_profile(struct btrfs_fs_info * fs_info)237 static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
238 {
239 	return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
240 }
241 
btrfs_block_group_cache_done(struct btrfs_block_group_cache * cache)242 static inline int btrfs_block_group_cache_done(
243 		struct btrfs_block_group_cache *cache)
244 {
245 	smp_mb();
246 	return cache->cached == BTRFS_CACHE_FINISHED ||
247 		cache->cached == BTRFS_CACHE_ERROR;
248 }
249 
250 #endif /* BTRFS_BLOCK_GROUP_H */
251