1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Facebook.  All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/sched/mm.h>
8 #include "ctree.h"
9 #include "disk-io.h"
10 #include "locking.h"
11 #include "free-space-tree.h"
12 #include "transaction.h"
13 #include "block-group.h"
14 
15 static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
16 					struct btrfs_block_group_cache *block_group,
17 					struct btrfs_path *path);
18 
set_free_space_tree_thresholds(struct btrfs_block_group_cache * cache)19 void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
20 {
21 	u32 bitmap_range;
22 	size_t bitmap_size;
23 	u64 num_bitmaps, total_bitmap_size;
24 
25 	/*
26 	 * We convert to bitmaps when the disk space required for using extents
27 	 * exceeds that required for using bitmaps.
28 	 */
29 	bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
30 	num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1,
31 			      bitmap_range);
32 	bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE;
33 	total_bitmap_size = num_bitmaps * bitmap_size;
34 	cache->bitmap_high_thresh = div_u64(total_bitmap_size,
35 					    sizeof(struct btrfs_item));
36 
37 	/*
38 	 * We allow for a small buffer between the high threshold and low
39 	 * threshold to avoid thrashing back and forth between the two formats.
40 	 */
41 	if (cache->bitmap_high_thresh > 100)
42 		cache->bitmap_low_thresh = cache->bitmap_high_thresh - 100;
43 	else
44 		cache->bitmap_low_thresh = 0;
45 }
46 
add_new_free_space_info(struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct btrfs_path * path)47 static int add_new_free_space_info(struct btrfs_trans_handle *trans,
48 				   struct btrfs_block_group_cache *block_group,
49 				   struct btrfs_path *path)
50 {
51 	struct btrfs_root *root = trans->fs_info->free_space_root;
52 	struct btrfs_free_space_info *info;
53 	struct btrfs_key key;
54 	struct extent_buffer *leaf;
55 	int ret;
56 
57 	key.objectid = block_group->key.objectid;
58 	key.type = BTRFS_FREE_SPACE_INFO_KEY;
59 	key.offset = block_group->key.offset;
60 
61 	ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info));
62 	if (ret)
63 		goto out;
64 
65 	leaf = path->nodes[0];
66 	info = btrfs_item_ptr(leaf, path->slots[0],
67 			      struct btrfs_free_space_info);
68 	btrfs_set_free_space_extent_count(leaf, info, 0);
69 	btrfs_set_free_space_flags(leaf, info, 0);
70 	btrfs_mark_buffer_dirty(leaf);
71 
72 	ret = 0;
73 out:
74 	btrfs_release_path(path);
75 	return ret;
76 }
77 
78 EXPORT_FOR_TESTS
search_free_space_info(struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct btrfs_path * path,int cow)79 struct btrfs_free_space_info *search_free_space_info(
80 		struct btrfs_trans_handle *trans,
81 		struct btrfs_block_group_cache *block_group,
82 		struct btrfs_path *path, int cow)
83 {
84 	struct btrfs_fs_info *fs_info = block_group->fs_info;
85 	struct btrfs_root *root = fs_info->free_space_root;
86 	struct btrfs_key key;
87 	int ret;
88 
89 	key.objectid = block_group->key.objectid;
90 	key.type = BTRFS_FREE_SPACE_INFO_KEY;
91 	key.offset = block_group->key.offset;
92 
93 	ret = btrfs_search_slot(trans, root, &key, path, 0, cow);
94 	if (ret < 0)
95 		return ERR_PTR(ret);
96 	if (ret != 0) {
97 		btrfs_warn(fs_info, "missing free space info for %llu",
98 			   block_group->key.objectid);
99 		ASSERT(0);
100 		return ERR_PTR(-ENOENT);
101 	}
102 
103 	return btrfs_item_ptr(path->nodes[0], path->slots[0],
104 			      struct btrfs_free_space_info);
105 }
106 
107 /*
108  * btrfs_search_slot() but we're looking for the greatest key less than the
109  * passed key.
110  */
btrfs_search_prev_slot(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_key * key,struct btrfs_path * p,int ins_len,int cow)111 static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans,
112 				  struct btrfs_root *root,
113 				  struct btrfs_key *key, struct btrfs_path *p,
114 				  int ins_len, int cow)
115 {
116 	int ret;
117 
118 	ret = btrfs_search_slot(trans, root, key, p, ins_len, cow);
119 	if (ret < 0)
120 		return ret;
121 
122 	if (ret == 0) {
123 		ASSERT(0);
124 		return -EIO;
125 	}
126 
127 	if (p->slots[0] == 0) {
128 		ASSERT(0);
129 		return -EIO;
130 	}
131 	p->slots[0]--;
132 
133 	return 0;
134 }
135 
free_space_bitmap_size(u64 size,u32 sectorsize)136 static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize)
137 {
138 	return DIV_ROUND_UP((u32)div_u64(size, sectorsize), BITS_PER_BYTE);
139 }
140 
alloc_bitmap(u32 bitmap_size)141 static unsigned long *alloc_bitmap(u32 bitmap_size)
142 {
143 	unsigned long *ret;
144 	unsigned int nofs_flag;
145 	u32 bitmap_rounded_size = round_up(bitmap_size, sizeof(unsigned long));
146 
147 	/*
148 	 * GFP_NOFS doesn't work with kvmalloc(), but we really can't recurse
149 	 * into the filesystem as the free space bitmap can be modified in the
150 	 * critical section of a transaction commit.
151 	 *
152 	 * TODO: push the memalloc_nofs_{save,restore}() to the caller where we
153 	 * know that recursion is unsafe.
154 	 */
155 	nofs_flag = memalloc_nofs_save();
156 	ret = kvzalloc(bitmap_rounded_size, GFP_KERNEL);
157 	memalloc_nofs_restore(nofs_flag);
158 	return ret;
159 }
160 
le_bitmap_set(unsigned long * map,unsigned int start,int len)161 static void le_bitmap_set(unsigned long *map, unsigned int start, int len)
162 {
163 	u8 *p = ((u8 *)map) + BIT_BYTE(start);
164 	const unsigned int size = start + len;
165 	int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE);
166 	u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start);
167 
168 	while (len - bits_to_set >= 0) {
169 		*p |= mask_to_set;
170 		len -= bits_to_set;
171 		bits_to_set = BITS_PER_BYTE;
172 		mask_to_set = ~0;
173 		p++;
174 	}
175 	if (len) {
176 		mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
177 		*p |= mask_to_set;
178 	}
179 }
180 
181 EXPORT_FOR_TESTS
convert_free_space_to_bitmaps(struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct btrfs_path * path)182 int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
183 				  struct btrfs_block_group_cache *block_group,
184 				  struct btrfs_path *path)
185 {
186 	struct btrfs_fs_info *fs_info = trans->fs_info;
187 	struct btrfs_root *root = fs_info->free_space_root;
188 	struct btrfs_free_space_info *info;
189 	struct btrfs_key key, found_key;
190 	struct extent_buffer *leaf;
191 	unsigned long *bitmap;
192 	char *bitmap_cursor;
193 	u64 start, end;
194 	u64 bitmap_range, i;
195 	u32 bitmap_size, flags, expected_extent_count;
196 	u32 extent_count = 0;
197 	int done = 0, nr;
198 	int ret;
199 
200 	bitmap_size = free_space_bitmap_size(block_group->key.offset,
201 					     fs_info->sectorsize);
202 	bitmap = alloc_bitmap(bitmap_size);
203 	if (!bitmap) {
204 		ret = -ENOMEM;
205 		goto out;
206 	}
207 
208 	start = block_group->key.objectid;
209 	end = block_group->key.objectid + block_group->key.offset;
210 
211 	key.objectid = end - 1;
212 	key.type = (u8)-1;
213 	key.offset = (u64)-1;
214 
215 	while (!done) {
216 		ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
217 		if (ret)
218 			goto out;
219 
220 		leaf = path->nodes[0];
221 		nr = 0;
222 		path->slots[0]++;
223 		while (path->slots[0] > 0) {
224 			btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
225 
226 			if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
227 				ASSERT(found_key.objectid == block_group->key.objectid);
228 				ASSERT(found_key.offset == block_group->key.offset);
229 				done = 1;
230 				break;
231 			} else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) {
232 				u64 first, last;
233 
234 				ASSERT(found_key.objectid >= start);
235 				ASSERT(found_key.objectid < end);
236 				ASSERT(found_key.objectid + found_key.offset <= end);
237 
238 				first = div_u64(found_key.objectid - start,
239 						fs_info->sectorsize);
240 				last = div_u64(found_key.objectid + found_key.offset - start,
241 					       fs_info->sectorsize);
242 				le_bitmap_set(bitmap, first, last - first);
243 
244 				extent_count++;
245 				nr++;
246 				path->slots[0]--;
247 			} else {
248 				ASSERT(0);
249 			}
250 		}
251 
252 		ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
253 		if (ret)
254 			goto out;
255 		btrfs_release_path(path);
256 	}
257 
258 	info = search_free_space_info(trans, block_group, path, 1);
259 	if (IS_ERR(info)) {
260 		ret = PTR_ERR(info);
261 		goto out;
262 	}
263 	leaf = path->nodes[0];
264 	flags = btrfs_free_space_flags(leaf, info);
265 	flags |= BTRFS_FREE_SPACE_USING_BITMAPS;
266 	btrfs_set_free_space_flags(leaf, info, flags);
267 	expected_extent_count = btrfs_free_space_extent_count(leaf, info);
268 	btrfs_mark_buffer_dirty(leaf);
269 	btrfs_release_path(path);
270 
271 	if (extent_count != expected_extent_count) {
272 		btrfs_err(fs_info,
273 			  "incorrect extent count for %llu; counted %u, expected %u",
274 			  block_group->key.objectid, extent_count,
275 			  expected_extent_count);
276 		ASSERT(0);
277 		ret = -EIO;
278 		goto out;
279 	}
280 
281 	bitmap_cursor = (char *)bitmap;
282 	bitmap_range = fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
283 	i = start;
284 	while (i < end) {
285 		unsigned long ptr;
286 		u64 extent_size;
287 		u32 data_size;
288 
289 		extent_size = min(end - i, bitmap_range);
290 		data_size = free_space_bitmap_size(extent_size,
291 						   fs_info->sectorsize);
292 
293 		key.objectid = i;
294 		key.type = BTRFS_FREE_SPACE_BITMAP_KEY;
295 		key.offset = extent_size;
296 
297 		ret = btrfs_insert_empty_item(trans, root, path, &key,
298 					      data_size);
299 		if (ret)
300 			goto out;
301 
302 		leaf = path->nodes[0];
303 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
304 		write_extent_buffer(leaf, bitmap_cursor, ptr,
305 				    data_size);
306 		btrfs_mark_buffer_dirty(leaf);
307 		btrfs_release_path(path);
308 
309 		i += extent_size;
310 		bitmap_cursor += data_size;
311 	}
312 
313 	ret = 0;
314 out:
315 	kvfree(bitmap);
316 	if (ret)
317 		btrfs_abort_transaction(trans, ret);
318 	return ret;
319 }
320 
321 EXPORT_FOR_TESTS
convert_free_space_to_extents(struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct btrfs_path * path)322 int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
323 				  struct btrfs_block_group_cache *block_group,
324 				  struct btrfs_path *path)
325 {
326 	struct btrfs_fs_info *fs_info = trans->fs_info;
327 	struct btrfs_root *root = fs_info->free_space_root;
328 	struct btrfs_free_space_info *info;
329 	struct btrfs_key key, found_key;
330 	struct extent_buffer *leaf;
331 	unsigned long *bitmap;
332 	u64 start, end;
333 	u32 bitmap_size, flags, expected_extent_count;
334 	unsigned long nrbits, start_bit, end_bit;
335 	u32 extent_count = 0;
336 	int done = 0, nr;
337 	int ret;
338 
339 	bitmap_size = free_space_bitmap_size(block_group->key.offset,
340 					     fs_info->sectorsize);
341 	bitmap = alloc_bitmap(bitmap_size);
342 	if (!bitmap) {
343 		ret = -ENOMEM;
344 		goto out;
345 	}
346 
347 	start = block_group->key.objectid;
348 	end = block_group->key.objectid + block_group->key.offset;
349 
350 	key.objectid = end - 1;
351 	key.type = (u8)-1;
352 	key.offset = (u64)-1;
353 
354 	while (!done) {
355 		ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
356 		if (ret)
357 			goto out;
358 
359 		leaf = path->nodes[0];
360 		nr = 0;
361 		path->slots[0]++;
362 		while (path->slots[0] > 0) {
363 			btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
364 
365 			if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
366 				ASSERT(found_key.objectid == block_group->key.objectid);
367 				ASSERT(found_key.offset == block_group->key.offset);
368 				done = 1;
369 				break;
370 			} else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) {
371 				unsigned long ptr;
372 				char *bitmap_cursor;
373 				u32 bitmap_pos, data_size;
374 
375 				ASSERT(found_key.objectid >= start);
376 				ASSERT(found_key.objectid < end);
377 				ASSERT(found_key.objectid + found_key.offset <= end);
378 
379 				bitmap_pos = div_u64(found_key.objectid - start,
380 						     fs_info->sectorsize *
381 						     BITS_PER_BYTE);
382 				bitmap_cursor = ((char *)bitmap) + bitmap_pos;
383 				data_size = free_space_bitmap_size(found_key.offset,
384 								   fs_info->sectorsize);
385 
386 				ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1);
387 				read_extent_buffer(leaf, bitmap_cursor, ptr,
388 						   data_size);
389 
390 				nr++;
391 				path->slots[0]--;
392 			} else {
393 				ASSERT(0);
394 			}
395 		}
396 
397 		ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
398 		if (ret)
399 			goto out;
400 		btrfs_release_path(path);
401 	}
402 
403 	info = search_free_space_info(trans, block_group, path, 1);
404 	if (IS_ERR(info)) {
405 		ret = PTR_ERR(info);
406 		goto out;
407 	}
408 	leaf = path->nodes[0];
409 	flags = btrfs_free_space_flags(leaf, info);
410 	flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS;
411 	btrfs_set_free_space_flags(leaf, info, flags);
412 	expected_extent_count = btrfs_free_space_extent_count(leaf, info);
413 	btrfs_mark_buffer_dirty(leaf);
414 	btrfs_release_path(path);
415 
416 	nrbits = div_u64(block_group->key.offset, block_group->fs_info->sectorsize);
417 	start_bit = find_next_bit_le(bitmap, nrbits, 0);
418 
419 	while (start_bit < nrbits) {
420 		end_bit = find_next_zero_bit_le(bitmap, nrbits, start_bit);
421 		ASSERT(start_bit < end_bit);
422 
423 		key.objectid = start + start_bit * block_group->fs_info->sectorsize;
424 		key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
425 		key.offset = (end_bit - start_bit) * block_group->fs_info->sectorsize;
426 
427 		ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
428 		if (ret)
429 			goto out;
430 		btrfs_release_path(path);
431 
432 		extent_count++;
433 
434 		start_bit = find_next_bit_le(bitmap, nrbits, end_bit);
435 	}
436 
437 	if (extent_count != expected_extent_count) {
438 		btrfs_err(fs_info,
439 			  "incorrect extent count for %llu; counted %u, expected %u",
440 			  block_group->key.objectid, extent_count,
441 			  expected_extent_count);
442 		ASSERT(0);
443 		ret = -EIO;
444 		goto out;
445 	}
446 
447 	ret = 0;
448 out:
449 	kvfree(bitmap);
450 	if (ret)
451 		btrfs_abort_transaction(trans, ret);
452 	return ret;
453 }
454 
update_free_space_extent_count(struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct btrfs_path * path,int new_extents)455 static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
456 					  struct btrfs_block_group_cache *block_group,
457 					  struct btrfs_path *path,
458 					  int new_extents)
459 {
460 	struct btrfs_free_space_info *info;
461 	u32 flags;
462 	u32 extent_count;
463 	int ret = 0;
464 
465 	if (new_extents == 0)
466 		return 0;
467 
468 	info = search_free_space_info(trans, block_group, path, 1);
469 	if (IS_ERR(info)) {
470 		ret = PTR_ERR(info);
471 		goto out;
472 	}
473 	flags = btrfs_free_space_flags(path->nodes[0], info);
474 	extent_count = btrfs_free_space_extent_count(path->nodes[0], info);
475 
476 	extent_count += new_extents;
477 	btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count);
478 	btrfs_mark_buffer_dirty(path->nodes[0]);
479 	btrfs_release_path(path);
480 
481 	if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
482 	    extent_count > block_group->bitmap_high_thresh) {
483 		ret = convert_free_space_to_bitmaps(trans, block_group, path);
484 	} else if ((flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
485 		   extent_count < block_group->bitmap_low_thresh) {
486 		ret = convert_free_space_to_extents(trans, block_group, path);
487 	}
488 
489 out:
490 	return ret;
491 }
492 
493 EXPORT_FOR_TESTS
free_space_test_bit(struct btrfs_block_group_cache * block_group,struct btrfs_path * path,u64 offset)494 int free_space_test_bit(struct btrfs_block_group_cache *block_group,
495 			struct btrfs_path *path, u64 offset)
496 {
497 	struct extent_buffer *leaf;
498 	struct btrfs_key key;
499 	u64 found_start, found_end;
500 	unsigned long ptr, i;
501 
502 	leaf = path->nodes[0];
503 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
504 	ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY);
505 
506 	found_start = key.objectid;
507 	found_end = key.objectid + key.offset;
508 	ASSERT(offset >= found_start && offset < found_end);
509 
510 	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
511 	i = div_u64(offset - found_start,
512 		    block_group->fs_info->sectorsize);
513 	return !!extent_buffer_test_bit(leaf, ptr, i);
514 }
515 
free_space_set_bits(struct btrfs_block_group_cache * block_group,struct btrfs_path * path,u64 * start,u64 * size,int bit)516 static void free_space_set_bits(struct btrfs_block_group_cache *block_group,
517 				struct btrfs_path *path, u64 *start, u64 *size,
518 				int bit)
519 {
520 	struct btrfs_fs_info *fs_info = block_group->fs_info;
521 	struct extent_buffer *leaf;
522 	struct btrfs_key key;
523 	u64 end = *start + *size;
524 	u64 found_start, found_end;
525 	unsigned long ptr, first, last;
526 
527 	leaf = path->nodes[0];
528 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
529 	ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY);
530 
531 	found_start = key.objectid;
532 	found_end = key.objectid + key.offset;
533 	ASSERT(*start >= found_start && *start < found_end);
534 	ASSERT(end > found_start);
535 
536 	if (end > found_end)
537 		end = found_end;
538 
539 	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
540 	first = div_u64(*start - found_start, fs_info->sectorsize);
541 	last = div_u64(end - found_start, fs_info->sectorsize);
542 	if (bit)
543 		extent_buffer_bitmap_set(leaf, ptr, first, last - first);
544 	else
545 		extent_buffer_bitmap_clear(leaf, ptr, first, last - first);
546 	btrfs_mark_buffer_dirty(leaf);
547 
548 	*size -= end - *start;
549 	*start = end;
550 }
551 
552 /*
553  * We can't use btrfs_next_item() in modify_free_space_bitmap() because
554  * btrfs_next_leaf() doesn't get the path for writing. We can forgo the fancy
555  * tree walking in btrfs_next_leaf() anyways because we know exactly what we're
556  * looking for.
557  */
free_space_next_bitmap(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * p)558 static int free_space_next_bitmap(struct btrfs_trans_handle *trans,
559 				  struct btrfs_root *root, struct btrfs_path *p)
560 {
561 	struct btrfs_key key;
562 
563 	if (p->slots[0] + 1 < btrfs_header_nritems(p->nodes[0])) {
564 		p->slots[0]++;
565 		return 0;
566 	}
567 
568 	btrfs_item_key_to_cpu(p->nodes[0], &key, p->slots[0]);
569 	btrfs_release_path(p);
570 
571 	key.objectid += key.offset;
572 	key.type = (u8)-1;
573 	key.offset = (u64)-1;
574 
575 	return btrfs_search_prev_slot(trans, root, &key, p, 0, 1);
576 }
577 
578 /*
579  * If remove is 1, then we are removing free space, thus clearing bits in the
580  * bitmap. If remove is 0, then we are adding free space, thus setting bits in
581  * the bitmap.
582  */
modify_free_space_bitmap(struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct btrfs_path * path,u64 start,u64 size,int remove)583 static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
584 				    struct btrfs_block_group_cache *block_group,
585 				    struct btrfs_path *path,
586 				    u64 start, u64 size, int remove)
587 {
588 	struct btrfs_root *root = block_group->fs_info->free_space_root;
589 	struct btrfs_key key;
590 	u64 end = start + size;
591 	u64 cur_start, cur_size;
592 	int prev_bit, next_bit;
593 	int new_extents;
594 	int ret;
595 
596 	/*
597 	 * Read the bit for the block immediately before the extent of space if
598 	 * that block is within the block group.
599 	 */
600 	if (start > block_group->key.objectid) {
601 		u64 prev_block = start - block_group->fs_info->sectorsize;
602 
603 		key.objectid = prev_block;
604 		key.type = (u8)-1;
605 		key.offset = (u64)-1;
606 
607 		ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1);
608 		if (ret)
609 			goto out;
610 
611 		prev_bit = free_space_test_bit(block_group, path, prev_block);
612 
613 		/* The previous block may have been in the previous bitmap. */
614 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
615 		if (start >= key.objectid + key.offset) {
616 			ret = free_space_next_bitmap(trans, root, path);
617 			if (ret)
618 				goto out;
619 		}
620 	} else {
621 		key.objectid = start;
622 		key.type = (u8)-1;
623 		key.offset = (u64)-1;
624 
625 		ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1);
626 		if (ret)
627 			goto out;
628 
629 		prev_bit = -1;
630 	}
631 
632 	/*
633 	 * Iterate over all of the bitmaps overlapped by the extent of space,
634 	 * clearing/setting bits as required.
635 	 */
636 	cur_start = start;
637 	cur_size = size;
638 	while (1) {
639 		free_space_set_bits(block_group, path, &cur_start, &cur_size,
640 				    !remove);
641 		if (cur_size == 0)
642 			break;
643 		ret = free_space_next_bitmap(trans, root, path);
644 		if (ret)
645 			goto out;
646 	}
647 
648 	/*
649 	 * Read the bit for the block immediately after the extent of space if
650 	 * that block is within the block group.
651 	 */
652 	if (end < block_group->key.objectid + block_group->key.offset) {
653 		/* The next block may be in the next bitmap. */
654 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
655 		if (end >= key.objectid + key.offset) {
656 			ret = free_space_next_bitmap(trans, root, path);
657 			if (ret)
658 				goto out;
659 		}
660 
661 		next_bit = free_space_test_bit(block_group, path, end);
662 	} else {
663 		next_bit = -1;
664 	}
665 
666 	if (remove) {
667 		new_extents = -1;
668 		if (prev_bit == 1) {
669 			/* Leftover on the left. */
670 			new_extents++;
671 		}
672 		if (next_bit == 1) {
673 			/* Leftover on the right. */
674 			new_extents++;
675 		}
676 	} else {
677 		new_extents = 1;
678 		if (prev_bit == 1) {
679 			/* Merging with neighbor on the left. */
680 			new_extents--;
681 		}
682 		if (next_bit == 1) {
683 			/* Merging with neighbor on the right. */
684 			new_extents--;
685 		}
686 	}
687 
688 	btrfs_release_path(path);
689 	ret = update_free_space_extent_count(trans, block_group, path,
690 					     new_extents);
691 
692 out:
693 	return ret;
694 }
695 
remove_free_space_extent(struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct btrfs_path * path,u64 start,u64 size)696 static int remove_free_space_extent(struct btrfs_trans_handle *trans,
697 				    struct btrfs_block_group_cache *block_group,
698 				    struct btrfs_path *path,
699 				    u64 start, u64 size)
700 {
701 	struct btrfs_root *root = trans->fs_info->free_space_root;
702 	struct btrfs_key key;
703 	u64 found_start, found_end;
704 	u64 end = start + size;
705 	int new_extents = -1;
706 	int ret;
707 
708 	key.objectid = start;
709 	key.type = (u8)-1;
710 	key.offset = (u64)-1;
711 
712 	ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
713 	if (ret)
714 		goto out;
715 
716 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
717 
718 	ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY);
719 
720 	found_start = key.objectid;
721 	found_end = key.objectid + key.offset;
722 	ASSERT(start >= found_start && end <= found_end);
723 
724 	/*
725 	 * Okay, now that we've found the free space extent which contains the
726 	 * free space that we are removing, there are four cases:
727 	 *
728 	 * 1. We're using the whole extent: delete the key we found and
729 	 * decrement the free space extent count.
730 	 * 2. We are using part of the extent starting at the beginning: delete
731 	 * the key we found and insert a new key representing the leftover at
732 	 * the end. There is no net change in the number of extents.
733 	 * 3. We are using part of the extent ending at the end: delete the key
734 	 * we found and insert a new key representing the leftover at the
735 	 * beginning. There is no net change in the number of extents.
736 	 * 4. We are using part of the extent in the middle: delete the key we
737 	 * found and insert two new keys representing the leftovers on each
738 	 * side. Where we used to have one extent, we now have two, so increment
739 	 * the extent count. We may need to convert the block group to bitmaps
740 	 * as a result.
741 	 */
742 
743 	/* Delete the existing key (cases 1-4). */
744 	ret = btrfs_del_item(trans, root, path);
745 	if (ret)
746 		goto out;
747 
748 	/* Add a key for leftovers at the beginning (cases 3 and 4). */
749 	if (start > found_start) {
750 		key.objectid = found_start;
751 		key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
752 		key.offset = start - found_start;
753 
754 		btrfs_release_path(path);
755 		ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
756 		if (ret)
757 			goto out;
758 		new_extents++;
759 	}
760 
761 	/* Add a key for leftovers at the end (cases 2 and 4). */
762 	if (end < found_end) {
763 		key.objectid = end;
764 		key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
765 		key.offset = found_end - end;
766 
767 		btrfs_release_path(path);
768 		ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
769 		if (ret)
770 			goto out;
771 		new_extents++;
772 	}
773 
774 	btrfs_release_path(path);
775 	ret = update_free_space_extent_count(trans, block_group, path,
776 					     new_extents);
777 
778 out:
779 	return ret;
780 }
781 
782 EXPORT_FOR_TESTS
__remove_from_free_space_tree(struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct btrfs_path * path,u64 start,u64 size)783 int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
784 				  struct btrfs_block_group_cache *block_group,
785 				  struct btrfs_path *path, u64 start, u64 size)
786 {
787 	struct btrfs_free_space_info *info;
788 	u32 flags;
789 	int ret;
790 
791 	if (block_group->needs_free_space) {
792 		ret = __add_block_group_free_space(trans, block_group, path);
793 		if (ret)
794 			return ret;
795 	}
796 
797 	info = search_free_space_info(NULL, block_group, path, 0);
798 	if (IS_ERR(info))
799 		return PTR_ERR(info);
800 	flags = btrfs_free_space_flags(path->nodes[0], info);
801 	btrfs_release_path(path);
802 
803 	if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
804 		return modify_free_space_bitmap(trans, block_group, path,
805 						start, size, 1);
806 	} else {
807 		return remove_free_space_extent(trans, block_group, path,
808 						start, size);
809 	}
810 }
811 
remove_from_free_space_tree(struct btrfs_trans_handle * trans,u64 start,u64 size)812 int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
813 				u64 start, u64 size)
814 {
815 	struct btrfs_block_group_cache *block_group;
816 	struct btrfs_path *path;
817 	int ret;
818 
819 	if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
820 		return 0;
821 
822 	path = btrfs_alloc_path();
823 	if (!path) {
824 		ret = -ENOMEM;
825 		goto out;
826 	}
827 
828 	block_group = btrfs_lookup_block_group(trans->fs_info, start);
829 	if (!block_group) {
830 		ASSERT(0);
831 		ret = -ENOENT;
832 		goto out;
833 	}
834 
835 	mutex_lock(&block_group->free_space_lock);
836 	ret = __remove_from_free_space_tree(trans, block_group, path, start,
837 					    size);
838 	mutex_unlock(&block_group->free_space_lock);
839 
840 	btrfs_put_block_group(block_group);
841 out:
842 	btrfs_free_path(path);
843 	if (ret)
844 		btrfs_abort_transaction(trans, ret);
845 	return ret;
846 }
847 
add_free_space_extent(struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct btrfs_path * path,u64 start,u64 size)848 static int add_free_space_extent(struct btrfs_trans_handle *trans,
849 				 struct btrfs_block_group_cache *block_group,
850 				 struct btrfs_path *path,
851 				 u64 start, u64 size)
852 {
853 	struct btrfs_root *root = trans->fs_info->free_space_root;
854 	struct btrfs_key key, new_key;
855 	u64 found_start, found_end;
856 	u64 end = start + size;
857 	int new_extents = 1;
858 	int ret;
859 
860 	/*
861 	 * We are adding a new extent of free space, but we need to merge
862 	 * extents. There are four cases here:
863 	 *
864 	 * 1. The new extent does not have any immediate neighbors to merge
865 	 * with: add the new key and increment the free space extent count. We
866 	 * may need to convert the block group to bitmaps as a result.
867 	 * 2. The new extent has an immediate neighbor before it: remove the
868 	 * previous key and insert a new key combining both of them. There is no
869 	 * net change in the number of extents.
870 	 * 3. The new extent has an immediate neighbor after it: remove the next
871 	 * key and insert a new key combining both of them. There is no net
872 	 * change in the number of extents.
873 	 * 4. The new extent has immediate neighbors on both sides: remove both
874 	 * of the keys and insert a new key combining all of them. Where we used
875 	 * to have two extents, we now have one, so decrement the extent count.
876 	 */
877 
878 	new_key.objectid = start;
879 	new_key.type = BTRFS_FREE_SPACE_EXTENT_KEY;
880 	new_key.offset = size;
881 
882 	/* Search for a neighbor on the left. */
883 	if (start == block_group->key.objectid)
884 		goto right;
885 	key.objectid = start - 1;
886 	key.type = (u8)-1;
887 	key.offset = (u64)-1;
888 
889 	ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
890 	if (ret)
891 		goto out;
892 
893 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
894 
895 	if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) {
896 		ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY);
897 		btrfs_release_path(path);
898 		goto right;
899 	}
900 
901 	found_start = key.objectid;
902 	found_end = key.objectid + key.offset;
903 	ASSERT(found_start >= block_group->key.objectid &&
904 	       found_end > block_group->key.objectid);
905 	ASSERT(found_start < start && found_end <= start);
906 
907 	/*
908 	 * Delete the neighbor on the left and absorb it into the new key (cases
909 	 * 2 and 4).
910 	 */
911 	if (found_end == start) {
912 		ret = btrfs_del_item(trans, root, path);
913 		if (ret)
914 			goto out;
915 		new_key.objectid = found_start;
916 		new_key.offset += key.offset;
917 		new_extents--;
918 	}
919 	btrfs_release_path(path);
920 
921 right:
922 	/* Search for a neighbor on the right. */
923 	if (end == block_group->key.objectid + block_group->key.offset)
924 		goto insert;
925 	key.objectid = end;
926 	key.type = (u8)-1;
927 	key.offset = (u64)-1;
928 
929 	ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
930 	if (ret)
931 		goto out;
932 
933 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
934 
935 	if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) {
936 		ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY);
937 		btrfs_release_path(path);
938 		goto insert;
939 	}
940 
941 	found_start = key.objectid;
942 	found_end = key.objectid + key.offset;
943 	ASSERT(found_start >= block_group->key.objectid &&
944 	       found_end > block_group->key.objectid);
945 	ASSERT((found_start < start && found_end <= start) ||
946 	       (found_start >= end && found_end > end));
947 
948 	/*
949 	 * Delete the neighbor on the right and absorb it into the new key
950 	 * (cases 3 and 4).
951 	 */
952 	if (found_start == end) {
953 		ret = btrfs_del_item(trans, root, path);
954 		if (ret)
955 			goto out;
956 		new_key.offset += key.offset;
957 		new_extents--;
958 	}
959 	btrfs_release_path(path);
960 
961 insert:
962 	/* Insert the new key (cases 1-4). */
963 	ret = btrfs_insert_empty_item(trans, root, path, &new_key, 0);
964 	if (ret)
965 		goto out;
966 
967 	btrfs_release_path(path);
968 	ret = update_free_space_extent_count(trans, block_group, path,
969 					     new_extents);
970 
971 out:
972 	return ret;
973 }
974 
975 EXPORT_FOR_TESTS
__add_to_free_space_tree(struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct btrfs_path * path,u64 start,u64 size)976 int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
977 			     struct btrfs_block_group_cache *block_group,
978 			     struct btrfs_path *path, u64 start, u64 size)
979 {
980 	struct btrfs_free_space_info *info;
981 	u32 flags;
982 	int ret;
983 
984 	if (block_group->needs_free_space) {
985 		ret = __add_block_group_free_space(trans, block_group, path);
986 		if (ret)
987 			return ret;
988 	}
989 
990 	info = search_free_space_info(NULL, block_group, path, 0);
991 	if (IS_ERR(info))
992 		return PTR_ERR(info);
993 	flags = btrfs_free_space_flags(path->nodes[0], info);
994 	btrfs_release_path(path);
995 
996 	if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
997 		return modify_free_space_bitmap(trans, block_group, path,
998 						start, size, 0);
999 	} else {
1000 		return add_free_space_extent(trans, block_group, path, start,
1001 					     size);
1002 	}
1003 }
1004 
add_to_free_space_tree(struct btrfs_trans_handle * trans,u64 start,u64 size)1005 int add_to_free_space_tree(struct btrfs_trans_handle *trans,
1006 			   u64 start, u64 size)
1007 {
1008 	struct btrfs_block_group_cache *block_group;
1009 	struct btrfs_path *path;
1010 	int ret;
1011 
1012 	if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
1013 		return 0;
1014 
1015 	path = btrfs_alloc_path();
1016 	if (!path) {
1017 		ret = -ENOMEM;
1018 		goto out;
1019 	}
1020 
1021 	block_group = btrfs_lookup_block_group(trans->fs_info, start);
1022 	if (!block_group) {
1023 		ASSERT(0);
1024 		ret = -ENOENT;
1025 		goto out;
1026 	}
1027 
1028 	mutex_lock(&block_group->free_space_lock);
1029 	ret = __add_to_free_space_tree(trans, block_group, path, start, size);
1030 	mutex_unlock(&block_group->free_space_lock);
1031 
1032 	btrfs_put_block_group(block_group);
1033 out:
1034 	btrfs_free_path(path);
1035 	if (ret)
1036 		btrfs_abort_transaction(trans, ret);
1037 	return ret;
1038 }
1039 
1040 /*
1041  * Populate the free space tree by walking the extent tree. Operations on the
1042  * extent tree that happen as a result of writes to the free space tree will go
1043  * through the normal add/remove hooks.
1044  */
populate_free_space_tree(struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group)1045 static int populate_free_space_tree(struct btrfs_trans_handle *trans,
1046 				    struct btrfs_block_group_cache *block_group)
1047 {
1048 	struct btrfs_root *extent_root = trans->fs_info->extent_root;
1049 	struct btrfs_path *path, *path2;
1050 	struct btrfs_key key;
1051 	u64 start, end;
1052 	int ret;
1053 
1054 	path = btrfs_alloc_path();
1055 	if (!path)
1056 		return -ENOMEM;
1057 	path->reada = READA_FORWARD;
1058 
1059 	path2 = btrfs_alloc_path();
1060 	if (!path2) {
1061 		btrfs_free_path(path);
1062 		return -ENOMEM;
1063 	}
1064 
1065 	ret = add_new_free_space_info(trans, block_group, path2);
1066 	if (ret)
1067 		goto out;
1068 
1069 	mutex_lock(&block_group->free_space_lock);
1070 
1071 	/*
1072 	 * Iterate through all of the extent and metadata items in this block
1073 	 * group, adding the free space between them and the free space at the
1074 	 * end. Note that EXTENT_ITEM and METADATA_ITEM are less than
1075 	 * BLOCK_GROUP_ITEM, so an extent may precede the block group that it's
1076 	 * contained in.
1077 	 */
1078 	key.objectid = block_group->key.objectid;
1079 	key.type = BTRFS_EXTENT_ITEM_KEY;
1080 	key.offset = 0;
1081 
1082 	ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0);
1083 	if (ret < 0)
1084 		goto out_locked;
1085 	ASSERT(ret == 0);
1086 
1087 	start = block_group->key.objectid;
1088 	end = block_group->key.objectid + block_group->key.offset;
1089 	while (1) {
1090 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1091 
1092 		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
1093 		    key.type == BTRFS_METADATA_ITEM_KEY) {
1094 			if (key.objectid >= end)
1095 				break;
1096 
1097 			if (start < key.objectid) {
1098 				ret = __add_to_free_space_tree(trans,
1099 							       block_group,
1100 							       path2, start,
1101 							       key.objectid -
1102 							       start);
1103 				if (ret)
1104 					goto out_locked;
1105 			}
1106 			start = key.objectid;
1107 			if (key.type == BTRFS_METADATA_ITEM_KEY)
1108 				start += trans->fs_info->nodesize;
1109 			else
1110 				start += key.offset;
1111 		} else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1112 			if (key.objectid != block_group->key.objectid)
1113 				break;
1114 		}
1115 
1116 		ret = btrfs_next_item(extent_root, path);
1117 		if (ret < 0)
1118 			goto out_locked;
1119 		if (ret)
1120 			break;
1121 	}
1122 	if (start < end) {
1123 		ret = __add_to_free_space_tree(trans, block_group, path2,
1124 					       start, end - start);
1125 		if (ret)
1126 			goto out_locked;
1127 	}
1128 
1129 	ret = 0;
1130 out_locked:
1131 	mutex_unlock(&block_group->free_space_lock);
1132 out:
1133 	btrfs_free_path(path2);
1134 	btrfs_free_path(path);
1135 	return ret;
1136 }
1137 
btrfs_create_free_space_tree(struct btrfs_fs_info * fs_info)1138 int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
1139 {
1140 	struct btrfs_trans_handle *trans;
1141 	struct btrfs_root *tree_root = fs_info->tree_root;
1142 	struct btrfs_root *free_space_root;
1143 	struct btrfs_block_group_cache *block_group;
1144 	struct rb_node *node;
1145 	int ret;
1146 
1147 	trans = btrfs_start_transaction(tree_root, 0);
1148 	if (IS_ERR(trans))
1149 		return PTR_ERR(trans);
1150 
1151 	set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
1152 	free_space_root = btrfs_create_tree(trans,
1153 					    BTRFS_FREE_SPACE_TREE_OBJECTID);
1154 	if (IS_ERR(free_space_root)) {
1155 		ret = PTR_ERR(free_space_root);
1156 		goto abort;
1157 	}
1158 	fs_info->free_space_root = free_space_root;
1159 
1160 	node = rb_first(&fs_info->block_group_cache_tree);
1161 	while (node) {
1162 		block_group = rb_entry(node, struct btrfs_block_group_cache,
1163 				       cache_node);
1164 		ret = populate_free_space_tree(trans, block_group);
1165 		if (ret)
1166 			goto abort;
1167 		node = rb_next(node);
1168 	}
1169 
1170 	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
1171 	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
1172 	clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
1173 
1174 	return btrfs_commit_transaction(trans);
1175 
1176 abort:
1177 	clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
1178 	btrfs_abort_transaction(trans, ret);
1179 	btrfs_end_transaction(trans);
1180 	return ret;
1181 }
1182 
clear_free_space_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root)1183 static int clear_free_space_tree(struct btrfs_trans_handle *trans,
1184 				 struct btrfs_root *root)
1185 {
1186 	struct btrfs_path *path;
1187 	struct btrfs_key key;
1188 	int nr;
1189 	int ret;
1190 
1191 	path = btrfs_alloc_path();
1192 	if (!path)
1193 		return -ENOMEM;
1194 
1195 	path->leave_spinning = 1;
1196 
1197 	key.objectid = 0;
1198 	key.type = 0;
1199 	key.offset = 0;
1200 
1201 	while (1) {
1202 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1203 		if (ret < 0)
1204 			goto out;
1205 
1206 		nr = btrfs_header_nritems(path->nodes[0]);
1207 		if (!nr)
1208 			break;
1209 
1210 		path->slots[0] = 0;
1211 		ret = btrfs_del_items(trans, root, path, 0, nr);
1212 		if (ret)
1213 			goto out;
1214 
1215 		btrfs_release_path(path);
1216 	}
1217 
1218 	ret = 0;
1219 out:
1220 	btrfs_free_path(path);
1221 	return ret;
1222 }
1223 
btrfs_clear_free_space_tree(struct btrfs_fs_info * fs_info)1224 int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
1225 {
1226 	struct btrfs_trans_handle *trans;
1227 	struct btrfs_root *tree_root = fs_info->tree_root;
1228 	struct btrfs_root *free_space_root = fs_info->free_space_root;
1229 	int ret;
1230 
1231 	trans = btrfs_start_transaction(tree_root, 0);
1232 	if (IS_ERR(trans))
1233 		return PTR_ERR(trans);
1234 
1235 	btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE);
1236 	btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
1237 	fs_info->free_space_root = NULL;
1238 
1239 	ret = clear_free_space_tree(trans, free_space_root);
1240 	if (ret)
1241 		goto abort;
1242 
1243 	ret = btrfs_del_root(trans, &free_space_root->root_key);
1244 	if (ret)
1245 		goto abort;
1246 
1247 	list_del(&free_space_root->dirty_list);
1248 
1249 	btrfs_tree_lock(free_space_root->node);
1250 	btrfs_clean_tree_block(free_space_root->node);
1251 	btrfs_tree_unlock(free_space_root->node);
1252 	btrfs_free_tree_block(trans, free_space_root, free_space_root->node,
1253 			      0, 1);
1254 
1255 	free_extent_buffer(free_space_root->node);
1256 	free_extent_buffer(free_space_root->commit_root);
1257 	kfree(free_space_root);
1258 
1259 	return btrfs_commit_transaction(trans);
1260 
1261 abort:
1262 	btrfs_abort_transaction(trans, ret);
1263 	btrfs_end_transaction(trans);
1264 	return ret;
1265 }
1266 
__add_block_group_free_space(struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct btrfs_path * path)1267 static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
1268 					struct btrfs_block_group_cache *block_group,
1269 					struct btrfs_path *path)
1270 {
1271 	int ret;
1272 
1273 	block_group->needs_free_space = 0;
1274 
1275 	ret = add_new_free_space_info(trans, block_group, path);
1276 	if (ret)
1277 		return ret;
1278 
1279 	return __add_to_free_space_tree(trans, block_group, path,
1280 					block_group->key.objectid,
1281 					block_group->key.offset);
1282 }
1283 
add_block_group_free_space(struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group)1284 int add_block_group_free_space(struct btrfs_trans_handle *trans,
1285 			       struct btrfs_block_group_cache *block_group)
1286 {
1287 	struct btrfs_fs_info *fs_info = trans->fs_info;
1288 	struct btrfs_path *path = NULL;
1289 	int ret = 0;
1290 
1291 	if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
1292 		return 0;
1293 
1294 	mutex_lock(&block_group->free_space_lock);
1295 	if (!block_group->needs_free_space)
1296 		goto out;
1297 
1298 	path = btrfs_alloc_path();
1299 	if (!path) {
1300 		ret = -ENOMEM;
1301 		goto out;
1302 	}
1303 
1304 	ret = __add_block_group_free_space(trans, block_group, path);
1305 
1306 out:
1307 	btrfs_free_path(path);
1308 	mutex_unlock(&block_group->free_space_lock);
1309 	if (ret)
1310 		btrfs_abort_transaction(trans, ret);
1311 	return ret;
1312 }
1313 
remove_block_group_free_space(struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group)1314 int remove_block_group_free_space(struct btrfs_trans_handle *trans,
1315 				  struct btrfs_block_group_cache *block_group)
1316 {
1317 	struct btrfs_root *root = trans->fs_info->free_space_root;
1318 	struct btrfs_path *path;
1319 	struct btrfs_key key, found_key;
1320 	struct extent_buffer *leaf;
1321 	u64 start, end;
1322 	int done = 0, nr;
1323 	int ret;
1324 
1325 	if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
1326 		return 0;
1327 
1328 	if (block_group->needs_free_space) {
1329 		/* We never added this block group to the free space tree. */
1330 		return 0;
1331 	}
1332 
1333 	path = btrfs_alloc_path();
1334 	if (!path) {
1335 		ret = -ENOMEM;
1336 		goto out;
1337 	}
1338 
1339 	start = block_group->key.objectid;
1340 	end = block_group->key.objectid + block_group->key.offset;
1341 
1342 	key.objectid = end - 1;
1343 	key.type = (u8)-1;
1344 	key.offset = (u64)-1;
1345 
1346 	while (!done) {
1347 		ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1);
1348 		if (ret)
1349 			goto out;
1350 
1351 		leaf = path->nodes[0];
1352 		nr = 0;
1353 		path->slots[0]++;
1354 		while (path->slots[0] > 0) {
1355 			btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1);
1356 
1357 			if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) {
1358 				ASSERT(found_key.objectid == block_group->key.objectid);
1359 				ASSERT(found_key.offset == block_group->key.offset);
1360 				done = 1;
1361 				nr++;
1362 				path->slots[0]--;
1363 				break;
1364 			} else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY ||
1365 				   found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) {
1366 				ASSERT(found_key.objectid >= start);
1367 				ASSERT(found_key.objectid < end);
1368 				ASSERT(found_key.objectid + found_key.offset <= end);
1369 				nr++;
1370 				path->slots[0]--;
1371 			} else {
1372 				ASSERT(0);
1373 			}
1374 		}
1375 
1376 		ret = btrfs_del_items(trans, root, path, path->slots[0], nr);
1377 		if (ret)
1378 			goto out;
1379 		btrfs_release_path(path);
1380 	}
1381 
1382 	ret = 0;
1383 out:
1384 	btrfs_free_path(path);
1385 	if (ret)
1386 		btrfs_abort_transaction(trans, ret);
1387 	return ret;
1388 }
1389 
load_free_space_bitmaps(struct btrfs_caching_control * caching_ctl,struct btrfs_path * path,u32 expected_extent_count)1390 static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
1391 				   struct btrfs_path *path,
1392 				   u32 expected_extent_count)
1393 {
1394 	struct btrfs_block_group_cache *block_group;
1395 	struct btrfs_fs_info *fs_info;
1396 	struct btrfs_root *root;
1397 	struct btrfs_key key;
1398 	int prev_bit = 0, bit;
1399 	/* Initialize to silence GCC. */
1400 	u64 extent_start = 0;
1401 	u64 end, offset;
1402 	u64 total_found = 0;
1403 	u32 extent_count = 0;
1404 	int ret;
1405 
1406 	block_group = caching_ctl->block_group;
1407 	fs_info = block_group->fs_info;
1408 	root = fs_info->free_space_root;
1409 
1410 	end = block_group->key.objectid + block_group->key.offset;
1411 
1412 	while (1) {
1413 		ret = btrfs_next_item(root, path);
1414 		if (ret < 0)
1415 			goto out;
1416 		if (ret)
1417 			break;
1418 
1419 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1420 
1421 		if (key.type == BTRFS_FREE_SPACE_INFO_KEY)
1422 			break;
1423 
1424 		ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY);
1425 		ASSERT(key.objectid < end && key.objectid + key.offset <= end);
1426 
1427 		caching_ctl->progress = key.objectid;
1428 
1429 		offset = key.objectid;
1430 		while (offset < key.objectid + key.offset) {
1431 			bit = free_space_test_bit(block_group, path, offset);
1432 			if (prev_bit == 0 && bit == 1) {
1433 				extent_start = offset;
1434 			} else if (prev_bit == 1 && bit == 0) {
1435 				total_found += add_new_free_space(block_group,
1436 								  extent_start,
1437 								  offset);
1438 				if (total_found > CACHING_CTL_WAKE_UP) {
1439 					total_found = 0;
1440 					wake_up(&caching_ctl->wait);
1441 				}
1442 				extent_count++;
1443 			}
1444 			prev_bit = bit;
1445 			offset += fs_info->sectorsize;
1446 		}
1447 	}
1448 	if (prev_bit == 1) {
1449 		total_found += add_new_free_space(block_group, extent_start,
1450 						  end);
1451 		extent_count++;
1452 	}
1453 
1454 	if (extent_count != expected_extent_count) {
1455 		btrfs_err(fs_info,
1456 			  "incorrect extent count for %llu; counted %u, expected %u",
1457 			  block_group->key.objectid, extent_count,
1458 			  expected_extent_count);
1459 		ASSERT(0);
1460 		ret = -EIO;
1461 		goto out;
1462 	}
1463 
1464 	caching_ctl->progress = (u64)-1;
1465 
1466 	ret = 0;
1467 out:
1468 	return ret;
1469 }
1470 
load_free_space_extents(struct btrfs_caching_control * caching_ctl,struct btrfs_path * path,u32 expected_extent_count)1471 static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
1472 				   struct btrfs_path *path,
1473 				   u32 expected_extent_count)
1474 {
1475 	struct btrfs_block_group_cache *block_group;
1476 	struct btrfs_fs_info *fs_info;
1477 	struct btrfs_root *root;
1478 	struct btrfs_key key;
1479 	u64 end;
1480 	u64 total_found = 0;
1481 	u32 extent_count = 0;
1482 	int ret;
1483 
1484 	block_group = caching_ctl->block_group;
1485 	fs_info = block_group->fs_info;
1486 	root = fs_info->free_space_root;
1487 
1488 	end = block_group->key.objectid + block_group->key.offset;
1489 
1490 	while (1) {
1491 		ret = btrfs_next_item(root, path);
1492 		if (ret < 0)
1493 			goto out;
1494 		if (ret)
1495 			break;
1496 
1497 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1498 
1499 		if (key.type == BTRFS_FREE_SPACE_INFO_KEY)
1500 			break;
1501 
1502 		ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY);
1503 		ASSERT(key.objectid < end && key.objectid + key.offset <= end);
1504 
1505 		caching_ctl->progress = key.objectid;
1506 
1507 		total_found += add_new_free_space(block_group, key.objectid,
1508 						  key.objectid + key.offset);
1509 		if (total_found > CACHING_CTL_WAKE_UP) {
1510 			total_found = 0;
1511 			wake_up(&caching_ctl->wait);
1512 		}
1513 		extent_count++;
1514 	}
1515 
1516 	if (extent_count != expected_extent_count) {
1517 		btrfs_err(fs_info,
1518 			  "incorrect extent count for %llu; counted %u, expected %u",
1519 			  block_group->key.objectid, extent_count,
1520 			  expected_extent_count);
1521 		ASSERT(0);
1522 		ret = -EIO;
1523 		goto out;
1524 	}
1525 
1526 	caching_ctl->progress = (u64)-1;
1527 
1528 	ret = 0;
1529 out:
1530 	return ret;
1531 }
1532 
load_free_space_tree(struct btrfs_caching_control * caching_ctl)1533 int load_free_space_tree(struct btrfs_caching_control *caching_ctl)
1534 {
1535 	struct btrfs_block_group_cache *block_group;
1536 	struct btrfs_free_space_info *info;
1537 	struct btrfs_path *path;
1538 	u32 extent_count, flags;
1539 	int ret;
1540 
1541 	block_group = caching_ctl->block_group;
1542 
1543 	path = btrfs_alloc_path();
1544 	if (!path)
1545 		return -ENOMEM;
1546 
1547 	/*
1548 	 * Just like caching_thread() doesn't want to deadlock on the extent
1549 	 * tree, we don't want to deadlock on the free space tree.
1550 	 */
1551 	path->skip_locking = 1;
1552 	path->search_commit_root = 1;
1553 	path->reada = READA_FORWARD;
1554 
1555 	info = search_free_space_info(NULL, block_group, path, 0);
1556 	if (IS_ERR(info)) {
1557 		ret = PTR_ERR(info);
1558 		goto out;
1559 	}
1560 	extent_count = btrfs_free_space_extent_count(path->nodes[0], info);
1561 	flags = btrfs_free_space_flags(path->nodes[0], info);
1562 
1563 	/*
1564 	 * We left path pointing to the free space info item, so now
1565 	 * load_free_space_foo can just iterate through the free space tree from
1566 	 * there.
1567 	 */
1568 	if (flags & BTRFS_FREE_SPACE_USING_BITMAPS)
1569 		ret = load_free_space_bitmaps(caching_ctl, path, extent_count);
1570 	else
1571 		ret = load_free_space_extents(caching_ctl, path, extent_count);
1572 
1573 out:
1574 	btrfs_free_path(path);
1575 	return ret;
1576 }
1577