1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/buffer_head.h>
11 #include <linux/workqueue.h>
12 #include <linux/kthread.h>
13 #include <linux/slab.h>
14 #include <linux/migrate.h>
15 #include <linux/ratelimit.h>
16 #include <linux/uuid.h>
17 #include <linux/semaphore.h>
18 #include <linux/error-injection.h>
19 #include <linux/crc32c.h>
20 #include <asm/unaligned.h>
21 #include "ctree.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 #include "btrfs_inode.h"
25 #include "volumes.h"
26 #include "print-tree.h"
27 #include "locking.h"
28 #include "tree-log.h"
29 #include "free-space-cache.h"
30 #include "free-space-tree.h"
31 #include "inode-map.h"
32 #include "check-integrity.h"
33 #include "rcu-string.h"
34 #include "dev-replace.h"
35 #include "raid56.h"
36 #include "sysfs.h"
37 #include "qgroup.h"
38 #include "compression.h"
39 #include "tree-checker.h"
40 #include "ref-verify.h"
41 
42 #ifdef CONFIG_X86
43 #include <asm/cpufeature.h>
44 #endif
45 
46 #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
47 				 BTRFS_HEADER_FLAG_RELOC |\
48 				 BTRFS_SUPER_FLAG_ERROR |\
49 				 BTRFS_SUPER_FLAG_SEEDING |\
50 				 BTRFS_SUPER_FLAG_METADUMP |\
51 				 BTRFS_SUPER_FLAG_METADUMP_V2)
52 
53 static const struct extent_io_ops btree_extent_io_ops;
54 static void end_workqueue_fn(struct btrfs_work *work);
55 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
56 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
57 				      struct btrfs_fs_info *fs_info);
58 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
59 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
60 					struct extent_io_tree *dirty_pages,
61 					int mark);
62 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
63 				       struct extent_io_tree *pinned_extents);
64 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
65 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
66 
67 /*
68  * btrfs_end_io_wq structs are used to do processing in task context when an IO
69  * is complete.  This is used during reads to verify checksums, and it is used
70  * by writes to insert metadata for new file extents after IO is complete.
71  */
72 struct btrfs_end_io_wq {
73 	struct bio *bio;
74 	bio_end_io_t *end_io;
75 	void *private;
76 	struct btrfs_fs_info *info;
77 	blk_status_t status;
78 	enum btrfs_wq_endio_type metadata;
79 	struct btrfs_work work;
80 };
81 
82 static struct kmem_cache *btrfs_end_io_wq_cache;
83 
btrfs_end_io_wq_init(void)84 int __init btrfs_end_io_wq_init(void)
85 {
86 	btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
87 					sizeof(struct btrfs_end_io_wq),
88 					0,
89 					SLAB_MEM_SPREAD,
90 					NULL);
91 	if (!btrfs_end_io_wq_cache)
92 		return -ENOMEM;
93 	return 0;
94 }
95 
btrfs_end_io_wq_exit(void)96 void __cold btrfs_end_io_wq_exit(void)
97 {
98 	kmem_cache_destroy(btrfs_end_io_wq_cache);
99 }
100 
101 /*
102  * async submit bios are used to offload expensive checksumming
103  * onto the worker threads.  They checksum file and metadata bios
104  * just before they are sent down the IO stack.
105  */
106 struct async_submit_bio {
107 	void *private_data;
108 	struct bio *bio;
109 	extent_submit_bio_start_t *submit_bio_start;
110 	int mirror_num;
111 	/*
112 	 * bio_offset is optional, can be used if the pages in the bio
113 	 * can't tell us where in the file the bio should go
114 	 */
115 	u64 bio_offset;
116 	struct btrfs_work work;
117 	blk_status_t status;
118 };
119 
120 /*
121  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
122  * eb, the lockdep key is determined by the btrfs_root it belongs to and
123  * the level the eb occupies in the tree.
124  *
125  * Different roots are used for different purposes and may nest inside each
126  * other and they require separate keysets.  As lockdep keys should be
127  * static, assign keysets according to the purpose of the root as indicated
128  * by btrfs_root->objectid.  This ensures that all special purpose roots
129  * have separate keysets.
130  *
131  * Lock-nesting across peer nodes is always done with the immediate parent
132  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
133  * subclass to avoid triggering lockdep warning in such cases.
134  *
135  * The key is set by the readpage_end_io_hook after the buffer has passed
136  * csum validation but before the pages are unlocked.  It is also set by
137  * btrfs_init_new_buffer on freshly allocated blocks.
138  *
139  * We also add a check to make sure the highest level of the tree is the
140  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
141  * needs update as well.
142  */
143 #ifdef CONFIG_DEBUG_LOCK_ALLOC
144 # if BTRFS_MAX_LEVEL != 8
145 #  error
146 # endif
147 
148 static struct btrfs_lockdep_keyset {
149 	u64			id;		/* root objectid */
150 	const char		*name_stem;	/* lock name stem */
151 	char			names[BTRFS_MAX_LEVEL + 1][20];
152 	struct lock_class_key	keys[BTRFS_MAX_LEVEL + 1];
153 } btrfs_lockdep_keysets[] = {
154 	{ .id = BTRFS_ROOT_TREE_OBJECTID,	.name_stem = "root"	},
155 	{ .id = BTRFS_EXTENT_TREE_OBJECTID,	.name_stem = "extent"	},
156 	{ .id = BTRFS_CHUNK_TREE_OBJECTID,	.name_stem = "chunk"	},
157 	{ .id = BTRFS_DEV_TREE_OBJECTID,	.name_stem = "dev"	},
158 	{ .id = BTRFS_FS_TREE_OBJECTID,		.name_stem = "fs"	},
159 	{ .id = BTRFS_CSUM_TREE_OBJECTID,	.name_stem = "csum"	},
160 	{ .id = BTRFS_QUOTA_TREE_OBJECTID,	.name_stem = "quota"	},
161 	{ .id = BTRFS_TREE_LOG_OBJECTID,	.name_stem = "log"	},
162 	{ .id = BTRFS_TREE_RELOC_OBJECTID,	.name_stem = "treloc"	},
163 	{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID,	.name_stem = "dreloc"	},
164 	{ .id = BTRFS_UUID_TREE_OBJECTID,	.name_stem = "uuid"	},
165 	{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID,	.name_stem = "free-space" },
166 	{ .id = 0,				.name_stem = "tree"	},
167 };
168 
btrfs_init_lockdep(void)169 void __init btrfs_init_lockdep(void)
170 {
171 	int i, j;
172 
173 	/* initialize lockdep class names */
174 	for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
175 		struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
176 
177 		for (j = 0; j < ARRAY_SIZE(ks->names); j++)
178 			snprintf(ks->names[j], sizeof(ks->names[j]),
179 				 "btrfs-%s-%02d", ks->name_stem, j);
180 	}
181 }
182 
btrfs_set_buffer_lockdep_class(u64 objectid,struct extent_buffer * eb,int level)183 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
184 				    int level)
185 {
186 	struct btrfs_lockdep_keyset *ks;
187 
188 	BUG_ON(level >= ARRAY_SIZE(ks->keys));
189 
190 	/* find the matching keyset, id 0 is the default entry */
191 	for (ks = btrfs_lockdep_keysets; ks->id; ks++)
192 		if (ks->id == objectid)
193 			break;
194 
195 	lockdep_set_class_and_name(&eb->lock,
196 				   &ks->keys[level], ks->names[level]);
197 }
198 
199 #endif
200 
201 /*
202  * extents on the btree inode are pretty simple, there's one extent
203  * that covers the entire device
204  */
btree_get_extent(struct btrfs_inode * inode,struct page * page,size_t pg_offset,u64 start,u64 len,int create)205 struct extent_map *btree_get_extent(struct btrfs_inode *inode,
206 		struct page *page, size_t pg_offset, u64 start, u64 len,
207 		int create)
208 {
209 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
210 	struct extent_map_tree *em_tree = &inode->extent_tree;
211 	struct extent_map *em;
212 	int ret;
213 
214 	read_lock(&em_tree->lock);
215 	em = lookup_extent_mapping(em_tree, start, len);
216 	if (em) {
217 		em->bdev = fs_info->fs_devices->latest_bdev;
218 		read_unlock(&em_tree->lock);
219 		goto out;
220 	}
221 	read_unlock(&em_tree->lock);
222 
223 	em = alloc_extent_map();
224 	if (!em) {
225 		em = ERR_PTR(-ENOMEM);
226 		goto out;
227 	}
228 	em->start = 0;
229 	em->len = (u64)-1;
230 	em->block_len = (u64)-1;
231 	em->block_start = 0;
232 	em->bdev = fs_info->fs_devices->latest_bdev;
233 
234 	write_lock(&em_tree->lock);
235 	ret = add_extent_mapping(em_tree, em, 0);
236 	if (ret == -EEXIST) {
237 		free_extent_map(em);
238 		em = lookup_extent_mapping(em_tree, start, len);
239 		if (!em)
240 			em = ERR_PTR(-EIO);
241 	} else if (ret) {
242 		free_extent_map(em);
243 		em = ERR_PTR(ret);
244 	}
245 	write_unlock(&em_tree->lock);
246 
247 out:
248 	return em;
249 }
250 
btrfs_csum_data(const char * data,u32 seed,size_t len)251 u32 btrfs_csum_data(const char *data, u32 seed, size_t len)
252 {
253 	return crc32c(seed, data, len);
254 }
255 
btrfs_csum_final(u32 crc,u8 * result)256 void btrfs_csum_final(u32 crc, u8 *result)
257 {
258 	put_unaligned_le32(~crc, result);
259 }
260 
261 /*
262  * compute the csum for a btree block, and either verify it or write it
263  * into the csum field of the block.
264  */
csum_tree_block(struct btrfs_fs_info * fs_info,struct extent_buffer * buf,int verify)265 static int csum_tree_block(struct btrfs_fs_info *fs_info,
266 			   struct extent_buffer *buf,
267 			   int verify)
268 {
269 	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
270 	char result[BTRFS_CSUM_SIZE];
271 	unsigned long len;
272 	unsigned long cur_len;
273 	unsigned long offset = BTRFS_CSUM_SIZE;
274 	char *kaddr;
275 	unsigned long map_start;
276 	unsigned long map_len;
277 	int err;
278 	u32 crc = ~(u32)0;
279 
280 	len = buf->len - offset;
281 	while (len > 0) {
282 		err = map_private_extent_buffer(buf, offset, 32,
283 					&kaddr, &map_start, &map_len);
284 		if (err)
285 			return err;
286 		cur_len = min(len, map_len - (offset - map_start));
287 		crc = btrfs_csum_data(kaddr + offset - map_start,
288 				      crc, cur_len);
289 		len -= cur_len;
290 		offset += cur_len;
291 	}
292 	memset(result, 0, BTRFS_CSUM_SIZE);
293 
294 	btrfs_csum_final(crc, result);
295 
296 	if (verify) {
297 		if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
298 			u32 val;
299 			u32 found = 0;
300 			memcpy(&found, result, csum_size);
301 
302 			read_extent_buffer(buf, &val, 0, csum_size);
303 			btrfs_warn_rl(fs_info,
304 				"%s checksum verify failed on %llu wanted %X found %X level %d",
305 				fs_info->sb->s_id, buf->start,
306 				val, found, btrfs_header_level(buf));
307 			return -EUCLEAN;
308 		}
309 	} else {
310 		write_extent_buffer(buf, result, 0, csum_size);
311 	}
312 
313 	return 0;
314 }
315 
316 /*
317  * we can't consider a given block up to date unless the transid of the
318  * block matches the transid in the parent node's pointer.  This is how we
319  * detect blocks that either didn't get written at all or got written
320  * in the wrong place.
321  */
verify_parent_transid(struct extent_io_tree * io_tree,struct extent_buffer * eb,u64 parent_transid,int atomic)322 static int verify_parent_transid(struct extent_io_tree *io_tree,
323 				 struct extent_buffer *eb, u64 parent_transid,
324 				 int atomic)
325 {
326 	struct extent_state *cached_state = NULL;
327 	int ret;
328 	bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
329 
330 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
331 		return 0;
332 
333 	if (atomic)
334 		return -EAGAIN;
335 
336 	if (need_lock) {
337 		btrfs_tree_read_lock(eb);
338 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
339 	}
340 
341 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
342 			 &cached_state);
343 	if (extent_buffer_uptodate(eb) &&
344 	    btrfs_header_generation(eb) == parent_transid) {
345 		ret = 0;
346 		goto out;
347 	}
348 	btrfs_err_rl(eb->fs_info,
349 		"parent transid verify failed on %llu wanted %llu found %llu",
350 			eb->start,
351 			parent_transid, btrfs_header_generation(eb));
352 	ret = 1;
353 
354 	/*
355 	 * Things reading via commit roots that don't have normal protection,
356 	 * like send, can have a really old block in cache that may point at a
357 	 * block that has been freed and re-allocated.  So don't clear uptodate
358 	 * if we find an eb that is under IO (dirty/writeback) because we could
359 	 * end up reading in the stale data and then writing it back out and
360 	 * making everybody very sad.
361 	 */
362 	if (!extent_buffer_under_io(eb))
363 		clear_extent_buffer_uptodate(eb);
364 out:
365 	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
366 			     &cached_state);
367 	if (need_lock)
368 		btrfs_tree_read_unlock_blocking(eb);
369 	return ret;
370 }
371 
372 /*
373  * Return 0 if the superblock checksum type matches the checksum value of that
374  * algorithm. Pass the raw disk superblock data.
375  */
btrfs_check_super_csum(struct btrfs_fs_info * fs_info,char * raw_disk_sb)376 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
377 				  char *raw_disk_sb)
378 {
379 	struct btrfs_super_block *disk_sb =
380 		(struct btrfs_super_block *)raw_disk_sb;
381 	u16 csum_type = btrfs_super_csum_type(disk_sb);
382 	int ret = 0;
383 
384 	if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
385 		u32 crc = ~(u32)0;
386 		char result[sizeof(crc)];
387 
388 		/*
389 		 * The super_block structure does not span the whole
390 		 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
391 		 * is filled with zeros and is included in the checksum.
392 		 */
393 		crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
394 				crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
395 		btrfs_csum_final(crc, result);
396 
397 		if (memcmp(raw_disk_sb, result, sizeof(result)))
398 			ret = 1;
399 	}
400 
401 	if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
402 		btrfs_err(fs_info, "unsupported checksum algorithm %u",
403 				csum_type);
404 		ret = 1;
405 	}
406 
407 	return ret;
408 }
409 
verify_level_key(struct btrfs_fs_info * fs_info,struct extent_buffer * eb,int level,struct btrfs_key * first_key,u64 parent_transid)410 static int verify_level_key(struct btrfs_fs_info *fs_info,
411 			    struct extent_buffer *eb, int level,
412 			    struct btrfs_key *first_key, u64 parent_transid)
413 {
414 	int found_level;
415 	struct btrfs_key found_key;
416 	int ret;
417 
418 	found_level = btrfs_header_level(eb);
419 	if (found_level != level) {
420 #ifdef CONFIG_BTRFS_DEBUG
421 		WARN_ON(1);
422 		btrfs_err(fs_info,
423 "tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
424 			  eb->start, level, found_level);
425 #endif
426 		return -EIO;
427 	}
428 
429 	if (!first_key)
430 		return 0;
431 
432 	/*
433 	 * For live tree block (new tree blocks in current transaction),
434 	 * we need proper lock context to avoid race, which is impossible here.
435 	 * So we only checks tree blocks which is read from disk, whose
436 	 * generation <= fs_info->last_trans_committed.
437 	 */
438 	if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
439 		return 0;
440 	if (found_level)
441 		btrfs_node_key_to_cpu(eb, &found_key, 0);
442 	else
443 		btrfs_item_key_to_cpu(eb, &found_key, 0);
444 	ret = btrfs_comp_cpu_keys(first_key, &found_key);
445 
446 #ifdef CONFIG_BTRFS_DEBUG
447 	if (ret) {
448 		WARN_ON(1);
449 		btrfs_err(fs_info,
450 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
451 			  eb->start, parent_transid, first_key->objectid,
452 			  first_key->type, first_key->offset,
453 			  found_key.objectid, found_key.type,
454 			  found_key.offset);
455 	}
456 #endif
457 	return ret;
458 }
459 
460 /*
461  * helper to read a given tree block, doing retries as required when
462  * the checksums don't match and we have alternate mirrors to try.
463  *
464  * @parent_transid:	expected transid, skip check if 0
465  * @level:		expected level, mandatory check
466  * @first_key:		expected key of first slot, skip check if NULL
467  */
btree_read_extent_buffer_pages(struct btrfs_fs_info * fs_info,struct extent_buffer * eb,u64 parent_transid,int level,struct btrfs_key * first_key)468 static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
469 					  struct extent_buffer *eb,
470 					  u64 parent_transid, int level,
471 					  struct btrfs_key *first_key)
472 {
473 	struct extent_io_tree *io_tree;
474 	int failed = 0;
475 	int ret;
476 	int num_copies = 0;
477 	int mirror_num = 0;
478 	int failed_mirror = 0;
479 
480 	clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
481 	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
482 	while (1) {
483 		ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
484 					       mirror_num);
485 		if (!ret) {
486 			if (verify_parent_transid(io_tree, eb,
487 						   parent_transid, 0))
488 				ret = -EIO;
489 			else if (verify_level_key(fs_info, eb, level,
490 						  first_key, parent_transid))
491 				ret = -EUCLEAN;
492 			else
493 				break;
494 		}
495 
496 		/*
497 		 * This buffer's crc is fine, but its contents are corrupted, so
498 		 * there is no reason to read the other copies, they won't be
499 		 * any less wrong.
500 		 */
501 		if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags) ||
502 		    ret == -EUCLEAN)
503 			break;
504 
505 		num_copies = btrfs_num_copies(fs_info,
506 					      eb->start, eb->len);
507 		if (num_copies == 1)
508 			break;
509 
510 		if (!failed_mirror) {
511 			failed = 1;
512 			failed_mirror = eb->read_mirror;
513 		}
514 
515 		mirror_num++;
516 		if (mirror_num == failed_mirror)
517 			mirror_num++;
518 
519 		if (mirror_num > num_copies)
520 			break;
521 	}
522 
523 	if (failed && !ret && failed_mirror)
524 		repair_eb_io_failure(fs_info, eb, failed_mirror);
525 
526 	return ret;
527 }
528 
529 /*
530  * checksum a dirty tree block before IO.  This has extra checks to make sure
531  * we only fill in the checksum field in the first page of a multi-page block
532  */
533 
csum_dirty_buffer(struct btrfs_fs_info * fs_info,struct page * page)534 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
535 {
536 	u64 start = page_offset(page);
537 	u64 found_start;
538 	struct extent_buffer *eb;
539 
540 	eb = (struct extent_buffer *)page->private;
541 	if (page != eb->pages[0])
542 		return 0;
543 
544 	found_start = btrfs_header_bytenr(eb);
545 	/*
546 	 * Please do not consolidate these warnings into a single if.
547 	 * It is useful to know what went wrong.
548 	 */
549 	if (WARN_ON(found_start != start))
550 		return -EUCLEAN;
551 	if (WARN_ON(!PageUptodate(page)))
552 		return -EUCLEAN;
553 
554 	ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
555 			btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
556 
557 	return csum_tree_block(fs_info, eb, 0);
558 }
559 
check_tree_block_fsid(struct btrfs_fs_info * fs_info,struct extent_buffer * eb)560 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
561 				 struct extent_buffer *eb)
562 {
563 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
564 	u8 fsid[BTRFS_FSID_SIZE];
565 	int ret = 1;
566 
567 	read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
568 	while (fs_devices) {
569 		if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
570 			ret = 0;
571 			break;
572 		}
573 		fs_devices = fs_devices->seed;
574 	}
575 	return ret;
576 }
577 
btree_readpage_end_io_hook(struct btrfs_io_bio * io_bio,u64 phy_offset,struct page * page,u64 start,u64 end,int mirror)578 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
579 				      u64 phy_offset, struct page *page,
580 				      u64 start, u64 end, int mirror)
581 {
582 	u64 found_start;
583 	int found_level;
584 	struct extent_buffer *eb;
585 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
586 	struct btrfs_fs_info *fs_info = root->fs_info;
587 	int ret = 0;
588 	int reads_done;
589 
590 	if (!page->private)
591 		goto out;
592 
593 	eb = (struct extent_buffer *)page->private;
594 
595 	/* the pending IO might have been the only thing that kept this buffer
596 	 * in memory.  Make sure we have a ref for all this other checks
597 	 */
598 	extent_buffer_get(eb);
599 
600 	reads_done = atomic_dec_and_test(&eb->io_pages);
601 	if (!reads_done)
602 		goto err;
603 
604 	eb->read_mirror = mirror;
605 	if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
606 		ret = -EIO;
607 		goto err;
608 	}
609 
610 	found_start = btrfs_header_bytenr(eb);
611 	if (found_start != eb->start) {
612 		btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
613 			     eb->start, found_start);
614 		ret = -EIO;
615 		goto err;
616 	}
617 	if (check_tree_block_fsid(fs_info, eb)) {
618 		btrfs_err_rl(fs_info, "bad fsid on block %llu",
619 			     eb->start);
620 		ret = -EIO;
621 		goto err;
622 	}
623 	found_level = btrfs_header_level(eb);
624 	if (found_level >= BTRFS_MAX_LEVEL) {
625 		btrfs_err(fs_info, "bad tree block level %d on %llu",
626 			  (int)btrfs_header_level(eb), eb->start);
627 		ret = -EIO;
628 		goto err;
629 	}
630 
631 	btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
632 				       eb, found_level);
633 
634 	ret = csum_tree_block(fs_info, eb, 1);
635 	if (ret)
636 		goto err;
637 
638 	/*
639 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
640 	 * that we don't try and read the other copies of this block, just
641 	 * return -EIO.
642 	 */
643 	if (found_level == 0 && btrfs_check_leaf_full(fs_info, eb)) {
644 		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
645 		ret = -EIO;
646 	}
647 
648 	if (found_level > 0 && btrfs_check_node(fs_info, eb))
649 		ret = -EIO;
650 
651 	if (!ret)
652 		set_extent_buffer_uptodate(eb);
653 err:
654 	if (reads_done &&
655 	    test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
656 		btree_readahead_hook(eb, ret);
657 
658 	if (ret) {
659 		/*
660 		 * our io error hook is going to dec the io pages
661 		 * again, we have to make sure it has something
662 		 * to decrement
663 		 */
664 		atomic_inc(&eb->io_pages);
665 		clear_extent_buffer_uptodate(eb);
666 	}
667 	free_extent_buffer(eb);
668 out:
669 	return ret;
670 }
671 
btree_io_failed_hook(struct page * page,int failed_mirror)672 static int btree_io_failed_hook(struct page *page, int failed_mirror)
673 {
674 	struct extent_buffer *eb;
675 
676 	eb = (struct extent_buffer *)page->private;
677 	set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
678 	eb->read_mirror = failed_mirror;
679 	atomic_dec(&eb->io_pages);
680 	if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
681 		btree_readahead_hook(eb, -EIO);
682 	return -EIO;	/* we fixed nothing */
683 }
684 
end_workqueue_bio(struct bio * bio)685 static void end_workqueue_bio(struct bio *bio)
686 {
687 	struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
688 	struct btrfs_fs_info *fs_info;
689 	struct btrfs_workqueue *wq;
690 	btrfs_work_func_t func;
691 
692 	fs_info = end_io_wq->info;
693 	end_io_wq->status = bio->bi_status;
694 
695 	if (bio_op(bio) == REQ_OP_WRITE) {
696 		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
697 			wq = fs_info->endio_meta_write_workers;
698 			func = btrfs_endio_meta_write_helper;
699 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
700 			wq = fs_info->endio_freespace_worker;
701 			func = btrfs_freespace_write_helper;
702 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
703 			wq = fs_info->endio_raid56_workers;
704 			func = btrfs_endio_raid56_helper;
705 		} else {
706 			wq = fs_info->endio_write_workers;
707 			func = btrfs_endio_write_helper;
708 		}
709 	} else {
710 		if (unlikely(end_io_wq->metadata ==
711 			     BTRFS_WQ_ENDIO_DIO_REPAIR)) {
712 			wq = fs_info->endio_repair_workers;
713 			func = btrfs_endio_repair_helper;
714 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
715 			wq = fs_info->endio_raid56_workers;
716 			func = btrfs_endio_raid56_helper;
717 		} else if (end_io_wq->metadata) {
718 			wq = fs_info->endio_meta_workers;
719 			func = btrfs_endio_meta_helper;
720 		} else {
721 			wq = fs_info->endio_workers;
722 			func = btrfs_endio_helper;
723 		}
724 	}
725 
726 	btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
727 	btrfs_queue_work(wq, &end_io_wq->work);
728 }
729 
btrfs_bio_wq_end_io(struct btrfs_fs_info * info,struct bio * bio,enum btrfs_wq_endio_type metadata)730 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
731 			enum btrfs_wq_endio_type metadata)
732 {
733 	struct btrfs_end_io_wq *end_io_wq;
734 
735 	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
736 	if (!end_io_wq)
737 		return BLK_STS_RESOURCE;
738 
739 	end_io_wq->private = bio->bi_private;
740 	end_io_wq->end_io = bio->bi_end_io;
741 	end_io_wq->info = info;
742 	end_io_wq->status = 0;
743 	end_io_wq->bio = bio;
744 	end_io_wq->metadata = metadata;
745 
746 	bio->bi_private = end_io_wq;
747 	bio->bi_end_io = end_workqueue_bio;
748 	return 0;
749 }
750 
run_one_async_start(struct btrfs_work * work)751 static void run_one_async_start(struct btrfs_work *work)
752 {
753 	struct async_submit_bio *async;
754 	blk_status_t ret;
755 
756 	async = container_of(work, struct  async_submit_bio, work);
757 	ret = async->submit_bio_start(async->private_data, async->bio,
758 				      async->bio_offset);
759 	if (ret)
760 		async->status = ret;
761 }
762 
run_one_async_done(struct btrfs_work * work)763 static void run_one_async_done(struct btrfs_work *work)
764 {
765 	struct async_submit_bio *async;
766 
767 	async = container_of(work, struct  async_submit_bio, work);
768 
769 	/* If an error occurred we just want to clean up the bio and move on */
770 	if (async->status) {
771 		async->bio->bi_status = async->status;
772 		bio_endio(async->bio);
773 		return;
774 	}
775 
776 	btrfs_submit_bio_done(async->private_data, async->bio, async->mirror_num);
777 }
778 
run_one_async_free(struct btrfs_work * work)779 static void run_one_async_free(struct btrfs_work *work)
780 {
781 	struct async_submit_bio *async;
782 
783 	async = container_of(work, struct  async_submit_bio, work);
784 	kfree(async);
785 }
786 
btrfs_wq_submit_bio(struct btrfs_fs_info * fs_info,struct bio * bio,int mirror_num,unsigned long bio_flags,u64 bio_offset,void * private_data,extent_submit_bio_start_t * submit_bio_start)787 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
788 				 int mirror_num, unsigned long bio_flags,
789 				 u64 bio_offset, void *private_data,
790 				 extent_submit_bio_start_t *submit_bio_start)
791 {
792 	struct async_submit_bio *async;
793 
794 	async = kmalloc(sizeof(*async), GFP_NOFS);
795 	if (!async)
796 		return BLK_STS_RESOURCE;
797 
798 	async->private_data = private_data;
799 	async->bio = bio;
800 	async->mirror_num = mirror_num;
801 	async->submit_bio_start = submit_bio_start;
802 
803 	btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
804 			run_one_async_done, run_one_async_free);
805 
806 	async->bio_offset = bio_offset;
807 
808 	async->status = 0;
809 
810 	if (op_is_sync(bio->bi_opf))
811 		btrfs_set_work_high_priority(&async->work);
812 
813 	btrfs_queue_work(fs_info->workers, &async->work);
814 	return 0;
815 }
816 
btree_csum_one_bio(struct bio * bio)817 static blk_status_t btree_csum_one_bio(struct bio *bio)
818 {
819 	struct bio_vec *bvec;
820 	struct btrfs_root *root;
821 	int i, ret = 0;
822 
823 	ASSERT(!bio_flagged(bio, BIO_CLONED));
824 	bio_for_each_segment_all(bvec, bio, i) {
825 		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
826 		ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
827 		if (ret)
828 			break;
829 	}
830 
831 	return errno_to_blk_status(ret);
832 }
833 
btree_submit_bio_start(void * private_data,struct bio * bio,u64 bio_offset)834 static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
835 					     u64 bio_offset)
836 {
837 	/*
838 	 * when we're called for a write, we're already in the async
839 	 * submission context.  Just jump into btrfs_map_bio
840 	 */
841 	return btree_csum_one_bio(bio);
842 }
843 
check_async_write(struct btrfs_inode * bi)844 static int check_async_write(struct btrfs_inode *bi)
845 {
846 	if (atomic_read(&bi->sync_writers))
847 		return 0;
848 #ifdef CONFIG_X86
849 	if (static_cpu_has(X86_FEATURE_XMM4_2))
850 		return 0;
851 #endif
852 	return 1;
853 }
854 
btree_submit_bio_hook(void * private_data,struct bio * bio,int mirror_num,unsigned long bio_flags,u64 bio_offset)855 static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
856 					  int mirror_num, unsigned long bio_flags,
857 					  u64 bio_offset)
858 {
859 	struct inode *inode = private_data;
860 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
861 	int async = check_async_write(BTRFS_I(inode));
862 	blk_status_t ret;
863 
864 	if (bio_op(bio) != REQ_OP_WRITE) {
865 		/*
866 		 * called for a read, do the setup so that checksum validation
867 		 * can happen in the async kernel threads
868 		 */
869 		ret = btrfs_bio_wq_end_io(fs_info, bio,
870 					  BTRFS_WQ_ENDIO_METADATA);
871 		if (ret)
872 			goto out_w_error;
873 		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
874 	} else if (!async) {
875 		ret = btree_csum_one_bio(bio);
876 		if (ret)
877 			goto out_w_error;
878 		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
879 	} else {
880 		/*
881 		 * kthread helpers are used to submit writes so that
882 		 * checksumming can happen in parallel across all CPUs
883 		 */
884 		ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
885 					  bio_offset, private_data,
886 					  btree_submit_bio_start);
887 	}
888 
889 	if (ret)
890 		goto out_w_error;
891 	return 0;
892 
893 out_w_error:
894 	bio->bi_status = ret;
895 	bio_endio(bio);
896 	return ret;
897 }
898 
899 #ifdef CONFIG_MIGRATION
btree_migratepage(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)900 static int btree_migratepage(struct address_space *mapping,
901 			struct page *newpage, struct page *page,
902 			enum migrate_mode mode)
903 {
904 	/*
905 	 * we can't safely write a btree page from here,
906 	 * we haven't done the locking hook
907 	 */
908 	if (PageDirty(page))
909 		return -EAGAIN;
910 	/*
911 	 * Buffers may be managed in a filesystem specific way.
912 	 * We must have no buffers or drop them.
913 	 */
914 	if (page_has_private(page) &&
915 	    !try_to_release_page(page, GFP_KERNEL))
916 		return -EAGAIN;
917 	return migrate_page(mapping, newpage, page, mode);
918 }
919 #endif
920 
921 
btree_writepages(struct address_space * mapping,struct writeback_control * wbc)922 static int btree_writepages(struct address_space *mapping,
923 			    struct writeback_control *wbc)
924 {
925 	struct btrfs_fs_info *fs_info;
926 	int ret;
927 
928 	if (wbc->sync_mode == WB_SYNC_NONE) {
929 
930 		if (wbc->for_kupdate)
931 			return 0;
932 
933 		fs_info = BTRFS_I(mapping->host)->root->fs_info;
934 		/* this is a bit racy, but that's ok */
935 		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
936 					     BTRFS_DIRTY_METADATA_THRESH,
937 					     fs_info->dirty_metadata_batch);
938 		if (ret < 0)
939 			return 0;
940 	}
941 	return btree_write_cache_pages(mapping, wbc);
942 }
943 
btree_readpage(struct file * file,struct page * page)944 static int btree_readpage(struct file *file, struct page *page)
945 {
946 	struct extent_io_tree *tree;
947 	tree = &BTRFS_I(page->mapping->host)->io_tree;
948 	return extent_read_full_page(tree, page, btree_get_extent, 0);
949 }
950 
btree_releasepage(struct page * page,gfp_t gfp_flags)951 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
952 {
953 	if (PageWriteback(page) || PageDirty(page))
954 		return 0;
955 
956 	return try_release_extent_buffer(page);
957 }
958 
btree_invalidatepage(struct page * page,unsigned int offset,unsigned int length)959 static void btree_invalidatepage(struct page *page, unsigned int offset,
960 				 unsigned int length)
961 {
962 	struct extent_io_tree *tree;
963 	tree = &BTRFS_I(page->mapping->host)->io_tree;
964 	extent_invalidatepage(tree, page, offset);
965 	btree_releasepage(page, GFP_NOFS);
966 	if (PagePrivate(page)) {
967 		btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
968 			   "page private not zero on page %llu",
969 			   (unsigned long long)page_offset(page));
970 		ClearPagePrivate(page);
971 		set_page_private(page, 0);
972 		put_page(page);
973 	}
974 }
975 
btree_set_page_dirty(struct page * page)976 static int btree_set_page_dirty(struct page *page)
977 {
978 #ifdef DEBUG
979 	struct extent_buffer *eb;
980 
981 	BUG_ON(!PagePrivate(page));
982 	eb = (struct extent_buffer *)page->private;
983 	BUG_ON(!eb);
984 	BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
985 	BUG_ON(!atomic_read(&eb->refs));
986 	btrfs_assert_tree_locked(eb);
987 #endif
988 	return __set_page_dirty_nobuffers(page);
989 }
990 
991 static const struct address_space_operations btree_aops = {
992 	.readpage	= btree_readpage,
993 	.writepages	= btree_writepages,
994 	.releasepage	= btree_releasepage,
995 	.invalidatepage = btree_invalidatepage,
996 #ifdef CONFIG_MIGRATION
997 	.migratepage	= btree_migratepage,
998 #endif
999 	.set_page_dirty = btree_set_page_dirty,
1000 };
1001 
readahead_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr)1002 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1003 {
1004 	struct extent_buffer *buf = NULL;
1005 	struct inode *btree_inode = fs_info->btree_inode;
1006 
1007 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1008 	if (IS_ERR(buf))
1009 		return;
1010 	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1011 				 buf, WAIT_NONE, 0);
1012 	free_extent_buffer(buf);
1013 }
1014 
reada_tree_block_flagged(struct btrfs_fs_info * fs_info,u64 bytenr,int mirror_num,struct extent_buffer ** eb)1015 int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
1016 			 int mirror_num, struct extent_buffer **eb)
1017 {
1018 	struct extent_buffer *buf = NULL;
1019 	struct inode *btree_inode = fs_info->btree_inode;
1020 	struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1021 	int ret;
1022 
1023 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1024 	if (IS_ERR(buf))
1025 		return 0;
1026 
1027 	set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1028 
1029 	ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1030 				       mirror_num);
1031 	if (ret) {
1032 		free_extent_buffer(buf);
1033 		return ret;
1034 	}
1035 
1036 	if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1037 		free_extent_buffer(buf);
1038 		return -EIO;
1039 	} else if (extent_buffer_uptodate(buf)) {
1040 		*eb = buf;
1041 	} else {
1042 		free_extent_buffer(buf);
1043 	}
1044 	return 0;
1045 }
1046 
btrfs_find_create_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr)1047 struct extent_buffer *btrfs_find_create_tree_block(
1048 						struct btrfs_fs_info *fs_info,
1049 						u64 bytenr)
1050 {
1051 	if (btrfs_is_testing(fs_info))
1052 		return alloc_test_extent_buffer(fs_info, bytenr);
1053 	return alloc_extent_buffer(fs_info, bytenr);
1054 }
1055 
1056 
btrfs_write_tree_block(struct extent_buffer * buf)1057 int btrfs_write_tree_block(struct extent_buffer *buf)
1058 {
1059 	return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1060 					buf->start + buf->len - 1);
1061 }
1062 
btrfs_wait_tree_block_writeback(struct extent_buffer * buf)1063 void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1064 {
1065 	filemap_fdatawait_range(buf->pages[0]->mapping,
1066 			        buf->start, buf->start + buf->len - 1);
1067 }
1068 
1069 /*
1070  * Read tree block at logical address @bytenr and do variant basic but critical
1071  * verification.
1072  *
1073  * @parent_transid:	expected transid of this tree block, skip check if 0
1074  * @level:		expected level, mandatory check
1075  * @first_key:		expected key in slot 0, skip check if NULL
1076  */
read_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr,u64 parent_transid,int level,struct btrfs_key * first_key)1077 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1078 				      u64 parent_transid, int level,
1079 				      struct btrfs_key *first_key)
1080 {
1081 	struct extent_buffer *buf = NULL;
1082 	int ret;
1083 
1084 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1085 	if (IS_ERR(buf))
1086 		return buf;
1087 
1088 	ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
1089 					     level, first_key);
1090 	if (ret) {
1091 		free_extent_buffer(buf);
1092 		return ERR_PTR(ret);
1093 	}
1094 	return buf;
1095 
1096 }
1097 
clean_tree_block(struct btrfs_fs_info * fs_info,struct extent_buffer * buf)1098 void clean_tree_block(struct btrfs_fs_info *fs_info,
1099 		      struct extent_buffer *buf)
1100 {
1101 	if (btrfs_header_generation(buf) ==
1102 	    fs_info->running_transaction->transid) {
1103 		btrfs_assert_tree_locked(buf);
1104 
1105 		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1106 			percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1107 						 -buf->len,
1108 						 fs_info->dirty_metadata_batch);
1109 			/* ugh, clear_extent_buffer_dirty needs to lock the page */
1110 			btrfs_set_lock_blocking(buf);
1111 			clear_extent_buffer_dirty(buf);
1112 		}
1113 	}
1114 }
1115 
btrfs_alloc_subvolume_writers(void)1116 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1117 {
1118 	struct btrfs_subvolume_writers *writers;
1119 	int ret;
1120 
1121 	writers = kmalloc(sizeof(*writers), GFP_NOFS);
1122 	if (!writers)
1123 		return ERR_PTR(-ENOMEM);
1124 
1125 	ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
1126 	if (ret < 0) {
1127 		kfree(writers);
1128 		return ERR_PTR(ret);
1129 	}
1130 
1131 	init_waitqueue_head(&writers->wait);
1132 	return writers;
1133 }
1134 
1135 static void
btrfs_free_subvolume_writers(struct btrfs_subvolume_writers * writers)1136 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1137 {
1138 	percpu_counter_destroy(&writers->counter);
1139 	kfree(writers);
1140 }
1141 
__setup_root(struct btrfs_root * root,struct btrfs_fs_info * fs_info,u64 objectid)1142 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1143 			 u64 objectid)
1144 {
1145 	bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1146 	root->node = NULL;
1147 	root->commit_root = NULL;
1148 	root->state = 0;
1149 	root->orphan_cleanup_state = 0;
1150 
1151 	root->objectid = objectid;
1152 	root->last_trans = 0;
1153 	root->highest_objectid = 0;
1154 	root->nr_delalloc_inodes = 0;
1155 	root->nr_ordered_extents = 0;
1156 	root->inode_tree = RB_ROOT;
1157 	INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1158 	root->block_rsv = NULL;
1159 
1160 	INIT_LIST_HEAD(&root->dirty_list);
1161 	INIT_LIST_HEAD(&root->root_list);
1162 	INIT_LIST_HEAD(&root->delalloc_inodes);
1163 	INIT_LIST_HEAD(&root->delalloc_root);
1164 	INIT_LIST_HEAD(&root->ordered_extents);
1165 	INIT_LIST_HEAD(&root->ordered_root);
1166 	INIT_LIST_HEAD(&root->logged_list[0]);
1167 	INIT_LIST_HEAD(&root->logged_list[1]);
1168 	spin_lock_init(&root->inode_lock);
1169 	spin_lock_init(&root->delalloc_lock);
1170 	spin_lock_init(&root->ordered_extent_lock);
1171 	spin_lock_init(&root->accounting_lock);
1172 	spin_lock_init(&root->log_extents_lock[0]);
1173 	spin_lock_init(&root->log_extents_lock[1]);
1174 	spin_lock_init(&root->qgroup_meta_rsv_lock);
1175 	mutex_init(&root->objectid_mutex);
1176 	mutex_init(&root->log_mutex);
1177 	mutex_init(&root->ordered_extent_mutex);
1178 	mutex_init(&root->delalloc_mutex);
1179 	init_waitqueue_head(&root->log_writer_wait);
1180 	init_waitqueue_head(&root->log_commit_wait[0]);
1181 	init_waitqueue_head(&root->log_commit_wait[1]);
1182 	INIT_LIST_HEAD(&root->log_ctxs[0]);
1183 	INIT_LIST_HEAD(&root->log_ctxs[1]);
1184 	atomic_set(&root->log_commit[0], 0);
1185 	atomic_set(&root->log_commit[1], 0);
1186 	atomic_set(&root->log_writers, 0);
1187 	atomic_set(&root->log_batch, 0);
1188 	refcount_set(&root->refs, 1);
1189 	atomic_set(&root->will_be_snapshotted, 0);
1190 	atomic_set(&root->snapshot_force_cow, 0);
1191 	root->log_transid = 0;
1192 	root->log_transid_committed = -1;
1193 	root->last_log_commit = 0;
1194 	if (!dummy)
1195 		extent_io_tree_init(&root->dirty_log_pages, NULL);
1196 
1197 	memset(&root->root_key, 0, sizeof(root->root_key));
1198 	memset(&root->root_item, 0, sizeof(root->root_item));
1199 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1200 	if (!dummy)
1201 		root->defrag_trans_start = fs_info->generation;
1202 	else
1203 		root->defrag_trans_start = 0;
1204 	root->root_key.objectid = objectid;
1205 	root->anon_dev = 0;
1206 
1207 	spin_lock_init(&root->root_item_lock);
1208 }
1209 
btrfs_alloc_root(struct btrfs_fs_info * fs_info,gfp_t flags)1210 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1211 		gfp_t flags)
1212 {
1213 	struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1214 	if (root)
1215 		root->fs_info = fs_info;
1216 	return root;
1217 }
1218 
1219 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1220 /* Should only be used by the testing infrastructure */
btrfs_alloc_dummy_root(struct btrfs_fs_info * fs_info)1221 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1222 {
1223 	struct btrfs_root *root;
1224 
1225 	if (!fs_info)
1226 		return ERR_PTR(-EINVAL);
1227 
1228 	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1229 	if (!root)
1230 		return ERR_PTR(-ENOMEM);
1231 
1232 	/* We don't use the stripesize in selftest, set it as sectorsize */
1233 	__setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1234 	root->alloc_bytenr = 0;
1235 
1236 	return root;
1237 }
1238 #endif
1239 
btrfs_create_tree(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 objectid)1240 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1241 				     struct btrfs_fs_info *fs_info,
1242 				     u64 objectid)
1243 {
1244 	struct extent_buffer *leaf;
1245 	struct btrfs_root *tree_root = fs_info->tree_root;
1246 	struct btrfs_root *root;
1247 	struct btrfs_key key;
1248 	int ret = 0;
1249 	uuid_le uuid = NULL_UUID_LE;
1250 
1251 	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1252 	if (!root)
1253 		return ERR_PTR(-ENOMEM);
1254 
1255 	__setup_root(root, fs_info, objectid);
1256 	root->root_key.objectid = objectid;
1257 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1258 	root->root_key.offset = 0;
1259 
1260 	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1261 	if (IS_ERR(leaf)) {
1262 		ret = PTR_ERR(leaf);
1263 		leaf = NULL;
1264 		goto fail;
1265 	}
1266 
1267 	root->node = leaf;
1268 	btrfs_mark_buffer_dirty(leaf);
1269 
1270 	root->commit_root = btrfs_root_node(root);
1271 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1272 
1273 	root->root_item.flags = 0;
1274 	root->root_item.byte_limit = 0;
1275 	btrfs_set_root_bytenr(&root->root_item, leaf->start);
1276 	btrfs_set_root_generation(&root->root_item, trans->transid);
1277 	btrfs_set_root_level(&root->root_item, 0);
1278 	btrfs_set_root_refs(&root->root_item, 1);
1279 	btrfs_set_root_used(&root->root_item, leaf->len);
1280 	btrfs_set_root_last_snapshot(&root->root_item, 0);
1281 	btrfs_set_root_dirid(&root->root_item, 0);
1282 	if (is_fstree(objectid))
1283 		uuid_le_gen(&uuid);
1284 	memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1285 	root->root_item.drop_level = 0;
1286 
1287 	key.objectid = objectid;
1288 	key.type = BTRFS_ROOT_ITEM_KEY;
1289 	key.offset = 0;
1290 	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1291 	if (ret)
1292 		goto fail;
1293 
1294 	btrfs_tree_unlock(leaf);
1295 
1296 	return root;
1297 
1298 fail:
1299 	if (leaf) {
1300 		btrfs_tree_unlock(leaf);
1301 		free_extent_buffer(root->commit_root);
1302 		free_extent_buffer(leaf);
1303 	}
1304 	kfree(root);
1305 
1306 	return ERR_PTR(ret);
1307 }
1308 
alloc_log_tree(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)1309 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1310 					 struct btrfs_fs_info *fs_info)
1311 {
1312 	struct btrfs_root *root;
1313 	struct extent_buffer *leaf;
1314 
1315 	root = btrfs_alloc_root(fs_info, GFP_NOFS);
1316 	if (!root)
1317 		return ERR_PTR(-ENOMEM);
1318 
1319 	__setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1320 
1321 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1322 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1323 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1324 
1325 	/*
1326 	 * DON'T set REF_COWS for log trees
1327 	 *
1328 	 * log trees do not get reference counted because they go away
1329 	 * before a real commit is actually done.  They do store pointers
1330 	 * to file data extents, and those reference counts still get
1331 	 * updated (along with back refs to the log tree).
1332 	 */
1333 
1334 	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1335 			NULL, 0, 0, 0);
1336 	if (IS_ERR(leaf)) {
1337 		kfree(root);
1338 		return ERR_CAST(leaf);
1339 	}
1340 
1341 	root->node = leaf;
1342 
1343 	btrfs_mark_buffer_dirty(root->node);
1344 	btrfs_tree_unlock(root->node);
1345 	return root;
1346 }
1347 
btrfs_init_log_root_tree(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)1348 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1349 			     struct btrfs_fs_info *fs_info)
1350 {
1351 	struct btrfs_root *log_root;
1352 
1353 	log_root = alloc_log_tree(trans, fs_info);
1354 	if (IS_ERR(log_root))
1355 		return PTR_ERR(log_root);
1356 	WARN_ON(fs_info->log_root_tree);
1357 	fs_info->log_root_tree = log_root;
1358 	return 0;
1359 }
1360 
btrfs_add_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root)1361 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1362 		       struct btrfs_root *root)
1363 {
1364 	struct btrfs_fs_info *fs_info = root->fs_info;
1365 	struct btrfs_root *log_root;
1366 	struct btrfs_inode_item *inode_item;
1367 
1368 	log_root = alloc_log_tree(trans, fs_info);
1369 	if (IS_ERR(log_root))
1370 		return PTR_ERR(log_root);
1371 
1372 	log_root->last_trans = trans->transid;
1373 	log_root->root_key.offset = root->root_key.objectid;
1374 
1375 	inode_item = &log_root->root_item.inode;
1376 	btrfs_set_stack_inode_generation(inode_item, 1);
1377 	btrfs_set_stack_inode_size(inode_item, 3);
1378 	btrfs_set_stack_inode_nlink(inode_item, 1);
1379 	btrfs_set_stack_inode_nbytes(inode_item,
1380 				     fs_info->nodesize);
1381 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1382 
1383 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1384 
1385 	WARN_ON(root->log_root);
1386 	root->log_root = log_root;
1387 	root->log_transid = 0;
1388 	root->log_transid_committed = -1;
1389 	root->last_log_commit = 0;
1390 	return 0;
1391 }
1392 
btrfs_read_tree_root(struct btrfs_root * tree_root,struct btrfs_key * key)1393 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1394 					       struct btrfs_key *key)
1395 {
1396 	struct btrfs_root *root;
1397 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1398 	struct btrfs_path *path;
1399 	u64 generation;
1400 	int ret;
1401 	int level;
1402 
1403 	path = btrfs_alloc_path();
1404 	if (!path)
1405 		return ERR_PTR(-ENOMEM);
1406 
1407 	root = btrfs_alloc_root(fs_info, GFP_NOFS);
1408 	if (!root) {
1409 		ret = -ENOMEM;
1410 		goto alloc_fail;
1411 	}
1412 
1413 	__setup_root(root, fs_info, key->objectid);
1414 
1415 	ret = btrfs_find_root(tree_root, key, path,
1416 			      &root->root_item, &root->root_key);
1417 	if (ret) {
1418 		if (ret > 0)
1419 			ret = -ENOENT;
1420 		goto find_fail;
1421 	}
1422 
1423 	generation = btrfs_root_generation(&root->root_item);
1424 	level = btrfs_root_level(&root->root_item);
1425 	root->node = read_tree_block(fs_info,
1426 				     btrfs_root_bytenr(&root->root_item),
1427 				     generation, level, NULL);
1428 	if (IS_ERR(root->node)) {
1429 		ret = PTR_ERR(root->node);
1430 		goto find_fail;
1431 	} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1432 		ret = -EIO;
1433 		free_extent_buffer(root->node);
1434 		goto find_fail;
1435 	}
1436 	root->commit_root = btrfs_root_node(root);
1437 out:
1438 	btrfs_free_path(path);
1439 	return root;
1440 
1441 find_fail:
1442 	kfree(root);
1443 alloc_fail:
1444 	root = ERR_PTR(ret);
1445 	goto out;
1446 }
1447 
btrfs_read_fs_root(struct btrfs_root * tree_root,struct btrfs_key * location)1448 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1449 				      struct btrfs_key *location)
1450 {
1451 	struct btrfs_root *root;
1452 
1453 	root = btrfs_read_tree_root(tree_root, location);
1454 	if (IS_ERR(root))
1455 		return root;
1456 
1457 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1458 		set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1459 		btrfs_check_and_init_root_item(&root->root_item);
1460 	}
1461 
1462 	return root;
1463 }
1464 
btrfs_init_fs_root(struct btrfs_root * root)1465 int btrfs_init_fs_root(struct btrfs_root *root)
1466 {
1467 	int ret;
1468 	struct btrfs_subvolume_writers *writers;
1469 
1470 	root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1471 	root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1472 					GFP_NOFS);
1473 	if (!root->free_ino_pinned || !root->free_ino_ctl) {
1474 		ret = -ENOMEM;
1475 		goto fail;
1476 	}
1477 
1478 	writers = btrfs_alloc_subvolume_writers();
1479 	if (IS_ERR(writers)) {
1480 		ret = PTR_ERR(writers);
1481 		goto fail;
1482 	}
1483 	root->subv_writers = writers;
1484 
1485 	btrfs_init_free_ino_ctl(root);
1486 	spin_lock_init(&root->ino_cache_lock);
1487 	init_waitqueue_head(&root->ino_cache_wait);
1488 
1489 	ret = get_anon_bdev(&root->anon_dev);
1490 	if (ret)
1491 		goto fail;
1492 
1493 	mutex_lock(&root->objectid_mutex);
1494 	ret = btrfs_find_highest_objectid(root,
1495 					&root->highest_objectid);
1496 	if (ret) {
1497 		mutex_unlock(&root->objectid_mutex);
1498 		goto fail;
1499 	}
1500 
1501 	ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1502 
1503 	mutex_unlock(&root->objectid_mutex);
1504 
1505 	return 0;
1506 fail:
1507 	/* The caller is responsible to call btrfs_free_fs_root */
1508 	return ret;
1509 }
1510 
btrfs_lookup_fs_root(struct btrfs_fs_info * fs_info,u64 root_id)1511 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1512 					u64 root_id)
1513 {
1514 	struct btrfs_root *root;
1515 
1516 	spin_lock(&fs_info->fs_roots_radix_lock);
1517 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1518 				 (unsigned long)root_id);
1519 	spin_unlock(&fs_info->fs_roots_radix_lock);
1520 	return root;
1521 }
1522 
btrfs_insert_fs_root(struct btrfs_fs_info * fs_info,struct btrfs_root * root)1523 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1524 			 struct btrfs_root *root)
1525 {
1526 	int ret;
1527 
1528 	ret = radix_tree_preload(GFP_NOFS);
1529 	if (ret)
1530 		return ret;
1531 
1532 	spin_lock(&fs_info->fs_roots_radix_lock);
1533 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1534 				(unsigned long)root->root_key.objectid,
1535 				root);
1536 	if (ret == 0)
1537 		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1538 	spin_unlock(&fs_info->fs_roots_radix_lock);
1539 	radix_tree_preload_end();
1540 
1541 	return ret;
1542 }
1543 
btrfs_get_fs_root(struct btrfs_fs_info * fs_info,struct btrfs_key * location,bool check_ref)1544 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1545 				     struct btrfs_key *location,
1546 				     bool check_ref)
1547 {
1548 	struct btrfs_root *root;
1549 	struct btrfs_path *path;
1550 	struct btrfs_key key;
1551 	int ret;
1552 
1553 	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1554 		return fs_info->tree_root;
1555 	if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1556 		return fs_info->extent_root;
1557 	if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1558 		return fs_info->chunk_root;
1559 	if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1560 		return fs_info->dev_root;
1561 	if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1562 		return fs_info->csum_root;
1563 	if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1564 		return fs_info->quota_root ? fs_info->quota_root :
1565 					     ERR_PTR(-ENOENT);
1566 	if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1567 		return fs_info->uuid_root ? fs_info->uuid_root :
1568 					    ERR_PTR(-ENOENT);
1569 	if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1570 		return fs_info->free_space_root ? fs_info->free_space_root :
1571 						  ERR_PTR(-ENOENT);
1572 again:
1573 	root = btrfs_lookup_fs_root(fs_info, location->objectid);
1574 	if (root) {
1575 		if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1576 			return ERR_PTR(-ENOENT);
1577 		return root;
1578 	}
1579 
1580 	root = btrfs_read_fs_root(fs_info->tree_root, location);
1581 	if (IS_ERR(root))
1582 		return root;
1583 
1584 	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1585 		ret = -ENOENT;
1586 		goto fail;
1587 	}
1588 
1589 	ret = btrfs_init_fs_root(root);
1590 	if (ret)
1591 		goto fail;
1592 
1593 	path = btrfs_alloc_path();
1594 	if (!path) {
1595 		ret = -ENOMEM;
1596 		goto fail;
1597 	}
1598 	key.objectid = BTRFS_ORPHAN_OBJECTID;
1599 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1600 	key.offset = location->objectid;
1601 
1602 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1603 	btrfs_free_path(path);
1604 	if (ret < 0)
1605 		goto fail;
1606 	if (ret == 0)
1607 		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1608 
1609 	ret = btrfs_insert_fs_root(fs_info, root);
1610 	if (ret) {
1611 		if (ret == -EEXIST) {
1612 			btrfs_free_fs_root(root);
1613 			goto again;
1614 		}
1615 		goto fail;
1616 	}
1617 	return root;
1618 fail:
1619 	btrfs_free_fs_root(root);
1620 	return ERR_PTR(ret);
1621 }
1622 
btrfs_congested_fn(void * congested_data,int bdi_bits)1623 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1624 {
1625 	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1626 	int ret = 0;
1627 	struct btrfs_device *device;
1628 	struct backing_dev_info *bdi;
1629 
1630 	rcu_read_lock();
1631 	list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1632 		if (!device->bdev)
1633 			continue;
1634 		bdi = device->bdev->bd_bdi;
1635 		if (bdi_congested(bdi, bdi_bits)) {
1636 			ret = 1;
1637 			break;
1638 		}
1639 	}
1640 	rcu_read_unlock();
1641 	return ret;
1642 }
1643 
1644 /*
1645  * called by the kthread helper functions to finally call the bio end_io
1646  * functions.  This is where read checksum verification actually happens
1647  */
end_workqueue_fn(struct btrfs_work * work)1648 static void end_workqueue_fn(struct btrfs_work *work)
1649 {
1650 	struct bio *bio;
1651 	struct btrfs_end_io_wq *end_io_wq;
1652 
1653 	end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1654 	bio = end_io_wq->bio;
1655 
1656 	bio->bi_status = end_io_wq->status;
1657 	bio->bi_private = end_io_wq->private;
1658 	bio->bi_end_io = end_io_wq->end_io;
1659 	kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1660 	bio_endio(bio);
1661 }
1662 
cleaner_kthread(void * arg)1663 static int cleaner_kthread(void *arg)
1664 {
1665 	struct btrfs_root *root = arg;
1666 	struct btrfs_fs_info *fs_info = root->fs_info;
1667 	int again;
1668 	struct btrfs_trans_handle *trans;
1669 
1670 	do {
1671 		again = 0;
1672 
1673 		/* Make the cleaner go to sleep early. */
1674 		if (btrfs_need_cleaner_sleep(fs_info))
1675 			goto sleep;
1676 
1677 		/*
1678 		 * Do not do anything if we might cause open_ctree() to block
1679 		 * before we have finished mounting the filesystem.
1680 		 */
1681 		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1682 			goto sleep;
1683 
1684 		if (!mutex_trylock(&fs_info->cleaner_mutex))
1685 			goto sleep;
1686 
1687 		/*
1688 		 * Avoid the problem that we change the status of the fs
1689 		 * during the above check and trylock.
1690 		 */
1691 		if (btrfs_need_cleaner_sleep(fs_info)) {
1692 			mutex_unlock(&fs_info->cleaner_mutex);
1693 			goto sleep;
1694 		}
1695 
1696 		mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
1697 		btrfs_run_delayed_iputs(fs_info);
1698 		mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
1699 
1700 		again = btrfs_clean_one_deleted_snapshot(root);
1701 		mutex_unlock(&fs_info->cleaner_mutex);
1702 
1703 		/*
1704 		 * The defragger has dealt with the R/O remount and umount,
1705 		 * needn't do anything special here.
1706 		 */
1707 		btrfs_run_defrag_inodes(fs_info);
1708 
1709 		/*
1710 		 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1711 		 * with relocation (btrfs_relocate_chunk) and relocation
1712 		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1713 		 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1714 		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1715 		 * unused block groups.
1716 		 */
1717 		btrfs_delete_unused_bgs(fs_info);
1718 sleep:
1719 		if (!again) {
1720 			set_current_state(TASK_INTERRUPTIBLE);
1721 			if (!kthread_should_stop())
1722 				schedule();
1723 			__set_current_state(TASK_RUNNING);
1724 		}
1725 	} while (!kthread_should_stop());
1726 
1727 	/*
1728 	 * Transaction kthread is stopped before us and wakes us up.
1729 	 * However we might have started a new transaction and COWed some
1730 	 * tree blocks when deleting unused block groups for example. So
1731 	 * make sure we commit the transaction we started to have a clean
1732 	 * shutdown when evicting the btree inode - if it has dirty pages
1733 	 * when we do the final iput() on it, eviction will trigger a
1734 	 * writeback for it which will fail with null pointer dereferences
1735 	 * since work queues and other resources were already released and
1736 	 * destroyed by the time the iput/eviction/writeback is made.
1737 	 */
1738 	trans = btrfs_attach_transaction(root);
1739 	if (IS_ERR(trans)) {
1740 		if (PTR_ERR(trans) != -ENOENT)
1741 			btrfs_err(fs_info,
1742 				  "cleaner transaction attach returned %ld",
1743 				  PTR_ERR(trans));
1744 	} else {
1745 		int ret;
1746 
1747 		ret = btrfs_commit_transaction(trans);
1748 		if (ret)
1749 			btrfs_err(fs_info,
1750 				  "cleaner open transaction commit returned %d",
1751 				  ret);
1752 	}
1753 
1754 	return 0;
1755 }
1756 
transaction_kthread(void * arg)1757 static int transaction_kthread(void *arg)
1758 {
1759 	struct btrfs_root *root = arg;
1760 	struct btrfs_fs_info *fs_info = root->fs_info;
1761 	struct btrfs_trans_handle *trans;
1762 	struct btrfs_transaction *cur;
1763 	u64 transid;
1764 	time64_t now;
1765 	unsigned long delay;
1766 	bool cannot_commit;
1767 
1768 	do {
1769 		cannot_commit = false;
1770 		delay = HZ * fs_info->commit_interval;
1771 		mutex_lock(&fs_info->transaction_kthread_mutex);
1772 
1773 		spin_lock(&fs_info->trans_lock);
1774 		cur = fs_info->running_transaction;
1775 		if (!cur) {
1776 			spin_unlock(&fs_info->trans_lock);
1777 			goto sleep;
1778 		}
1779 
1780 		now = ktime_get_seconds();
1781 		if (cur->state < TRANS_STATE_BLOCKED &&
1782 		    !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
1783 		    (now < cur->start_time ||
1784 		     now - cur->start_time < fs_info->commit_interval)) {
1785 			spin_unlock(&fs_info->trans_lock);
1786 			delay = HZ * 5;
1787 			goto sleep;
1788 		}
1789 		transid = cur->transid;
1790 		spin_unlock(&fs_info->trans_lock);
1791 
1792 		/* If the file system is aborted, this will always fail. */
1793 		trans = btrfs_attach_transaction(root);
1794 		if (IS_ERR(trans)) {
1795 			if (PTR_ERR(trans) != -ENOENT)
1796 				cannot_commit = true;
1797 			goto sleep;
1798 		}
1799 		if (transid == trans->transid) {
1800 			btrfs_commit_transaction(trans);
1801 		} else {
1802 			btrfs_end_transaction(trans);
1803 		}
1804 sleep:
1805 		wake_up_process(fs_info->cleaner_kthread);
1806 		mutex_unlock(&fs_info->transaction_kthread_mutex);
1807 
1808 		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1809 				      &fs_info->fs_state)))
1810 			btrfs_cleanup_transaction(fs_info);
1811 		if (!kthread_should_stop() &&
1812 				(!btrfs_transaction_blocked(fs_info) ||
1813 				 cannot_commit))
1814 			schedule_timeout_interruptible(delay);
1815 	} while (!kthread_should_stop());
1816 	return 0;
1817 }
1818 
1819 /*
1820  * this will find the highest generation in the array of
1821  * root backups.  The index of the highest array is returned,
1822  * or -1 if we can't find anything.
1823  *
1824  * We check to make sure the array is valid by comparing the
1825  * generation of the latest  root in the array with the generation
1826  * in the super block.  If they don't match we pitch it.
1827  */
find_newest_super_backup(struct btrfs_fs_info * info,u64 newest_gen)1828 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1829 {
1830 	u64 cur;
1831 	int newest_index = -1;
1832 	struct btrfs_root_backup *root_backup;
1833 	int i;
1834 
1835 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1836 		root_backup = info->super_copy->super_roots + i;
1837 		cur = btrfs_backup_tree_root_gen(root_backup);
1838 		if (cur == newest_gen)
1839 			newest_index = i;
1840 	}
1841 
1842 	/* check to see if we actually wrapped around */
1843 	if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1844 		root_backup = info->super_copy->super_roots;
1845 		cur = btrfs_backup_tree_root_gen(root_backup);
1846 		if (cur == newest_gen)
1847 			newest_index = 0;
1848 	}
1849 	return newest_index;
1850 }
1851 
1852 
1853 /*
1854  * find the oldest backup so we know where to store new entries
1855  * in the backup array.  This will set the backup_root_index
1856  * field in the fs_info struct
1857  */
find_oldest_super_backup(struct btrfs_fs_info * info,u64 newest_gen)1858 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1859 				     u64 newest_gen)
1860 {
1861 	int newest_index = -1;
1862 
1863 	newest_index = find_newest_super_backup(info, newest_gen);
1864 	/* if there was garbage in there, just move along */
1865 	if (newest_index == -1) {
1866 		info->backup_root_index = 0;
1867 	} else {
1868 		info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1869 	}
1870 }
1871 
1872 /*
1873  * copy all the root pointers into the super backup array.
1874  * this will bump the backup pointer by one when it is
1875  * done
1876  */
backup_super_roots(struct btrfs_fs_info * info)1877 static void backup_super_roots(struct btrfs_fs_info *info)
1878 {
1879 	int next_backup;
1880 	struct btrfs_root_backup *root_backup;
1881 	int last_backup;
1882 
1883 	next_backup = info->backup_root_index;
1884 	last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1885 		BTRFS_NUM_BACKUP_ROOTS;
1886 
1887 	/*
1888 	 * just overwrite the last backup if we're at the same generation
1889 	 * this happens only at umount
1890 	 */
1891 	root_backup = info->super_for_commit->super_roots + last_backup;
1892 	if (btrfs_backup_tree_root_gen(root_backup) ==
1893 	    btrfs_header_generation(info->tree_root->node))
1894 		next_backup = last_backup;
1895 
1896 	root_backup = info->super_for_commit->super_roots + next_backup;
1897 
1898 	/*
1899 	 * make sure all of our padding and empty slots get zero filled
1900 	 * regardless of which ones we use today
1901 	 */
1902 	memset(root_backup, 0, sizeof(*root_backup));
1903 
1904 	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1905 
1906 	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1907 	btrfs_set_backup_tree_root_gen(root_backup,
1908 			       btrfs_header_generation(info->tree_root->node));
1909 
1910 	btrfs_set_backup_tree_root_level(root_backup,
1911 			       btrfs_header_level(info->tree_root->node));
1912 
1913 	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1914 	btrfs_set_backup_chunk_root_gen(root_backup,
1915 			       btrfs_header_generation(info->chunk_root->node));
1916 	btrfs_set_backup_chunk_root_level(root_backup,
1917 			       btrfs_header_level(info->chunk_root->node));
1918 
1919 	btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1920 	btrfs_set_backup_extent_root_gen(root_backup,
1921 			       btrfs_header_generation(info->extent_root->node));
1922 	btrfs_set_backup_extent_root_level(root_backup,
1923 			       btrfs_header_level(info->extent_root->node));
1924 
1925 	/*
1926 	 * we might commit during log recovery, which happens before we set
1927 	 * the fs_root.  Make sure it is valid before we fill it in.
1928 	 */
1929 	if (info->fs_root && info->fs_root->node) {
1930 		btrfs_set_backup_fs_root(root_backup,
1931 					 info->fs_root->node->start);
1932 		btrfs_set_backup_fs_root_gen(root_backup,
1933 			       btrfs_header_generation(info->fs_root->node));
1934 		btrfs_set_backup_fs_root_level(root_backup,
1935 			       btrfs_header_level(info->fs_root->node));
1936 	}
1937 
1938 	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1939 	btrfs_set_backup_dev_root_gen(root_backup,
1940 			       btrfs_header_generation(info->dev_root->node));
1941 	btrfs_set_backup_dev_root_level(root_backup,
1942 				       btrfs_header_level(info->dev_root->node));
1943 
1944 	btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1945 	btrfs_set_backup_csum_root_gen(root_backup,
1946 			       btrfs_header_generation(info->csum_root->node));
1947 	btrfs_set_backup_csum_root_level(root_backup,
1948 			       btrfs_header_level(info->csum_root->node));
1949 
1950 	btrfs_set_backup_total_bytes(root_backup,
1951 			     btrfs_super_total_bytes(info->super_copy));
1952 	btrfs_set_backup_bytes_used(root_backup,
1953 			     btrfs_super_bytes_used(info->super_copy));
1954 	btrfs_set_backup_num_devices(root_backup,
1955 			     btrfs_super_num_devices(info->super_copy));
1956 
1957 	/*
1958 	 * if we don't copy this out to the super_copy, it won't get remembered
1959 	 * for the next commit
1960 	 */
1961 	memcpy(&info->super_copy->super_roots,
1962 	       &info->super_for_commit->super_roots,
1963 	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1964 }
1965 
1966 /*
1967  * this copies info out of the root backup array and back into
1968  * the in-memory super block.  It is meant to help iterate through
1969  * the array, so you send it the number of backups you've already
1970  * tried and the last backup index you used.
1971  *
1972  * this returns -1 when it has tried all the backups
1973  */
next_root_backup(struct btrfs_fs_info * info,struct btrfs_super_block * super,int * num_backups_tried,int * backup_index)1974 static noinline int next_root_backup(struct btrfs_fs_info *info,
1975 				     struct btrfs_super_block *super,
1976 				     int *num_backups_tried, int *backup_index)
1977 {
1978 	struct btrfs_root_backup *root_backup;
1979 	int newest = *backup_index;
1980 
1981 	if (*num_backups_tried == 0) {
1982 		u64 gen = btrfs_super_generation(super);
1983 
1984 		newest = find_newest_super_backup(info, gen);
1985 		if (newest == -1)
1986 			return -1;
1987 
1988 		*backup_index = newest;
1989 		*num_backups_tried = 1;
1990 	} else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1991 		/* we've tried all the backups, all done */
1992 		return -1;
1993 	} else {
1994 		/* jump to the next oldest backup */
1995 		newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1996 			BTRFS_NUM_BACKUP_ROOTS;
1997 		*backup_index = newest;
1998 		*num_backups_tried += 1;
1999 	}
2000 	root_backup = super->super_roots + newest;
2001 
2002 	btrfs_set_super_generation(super,
2003 				   btrfs_backup_tree_root_gen(root_backup));
2004 	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
2005 	btrfs_set_super_root_level(super,
2006 				   btrfs_backup_tree_root_level(root_backup));
2007 	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
2008 
2009 	/*
2010 	 * fixme: the total bytes and num_devices need to match or we should
2011 	 * need a fsck
2012 	 */
2013 	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
2014 	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
2015 	return 0;
2016 }
2017 
2018 /* helper to cleanup workers */
btrfs_stop_all_workers(struct btrfs_fs_info * fs_info)2019 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
2020 {
2021 	btrfs_destroy_workqueue(fs_info->fixup_workers);
2022 	btrfs_destroy_workqueue(fs_info->delalloc_workers);
2023 	btrfs_destroy_workqueue(fs_info->workers);
2024 	btrfs_destroy_workqueue(fs_info->endio_workers);
2025 	btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2026 	btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2027 	btrfs_destroy_workqueue(fs_info->rmw_workers);
2028 	btrfs_destroy_workqueue(fs_info->endio_write_workers);
2029 	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2030 	btrfs_destroy_workqueue(fs_info->submit_workers);
2031 	btrfs_destroy_workqueue(fs_info->delayed_workers);
2032 	btrfs_destroy_workqueue(fs_info->caching_workers);
2033 	btrfs_destroy_workqueue(fs_info->readahead_workers);
2034 	btrfs_destroy_workqueue(fs_info->flush_workers);
2035 	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2036 	btrfs_destroy_workqueue(fs_info->extent_workers);
2037 	/*
2038 	 * Now that all other work queues are destroyed, we can safely destroy
2039 	 * the queues used for metadata I/O, since tasks from those other work
2040 	 * queues can do metadata I/O operations.
2041 	 */
2042 	btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2043 	btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2044 }
2045 
free_root_extent_buffers(struct btrfs_root * root)2046 static void free_root_extent_buffers(struct btrfs_root *root)
2047 {
2048 	if (root) {
2049 		free_extent_buffer(root->node);
2050 		free_extent_buffer(root->commit_root);
2051 		root->node = NULL;
2052 		root->commit_root = NULL;
2053 	}
2054 }
2055 
2056 /* helper to cleanup tree roots */
free_root_pointers(struct btrfs_fs_info * info,int chunk_root)2057 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2058 {
2059 	free_root_extent_buffers(info->tree_root);
2060 
2061 	free_root_extent_buffers(info->dev_root);
2062 	free_root_extent_buffers(info->extent_root);
2063 	free_root_extent_buffers(info->csum_root);
2064 	free_root_extent_buffers(info->quota_root);
2065 	free_root_extent_buffers(info->uuid_root);
2066 	if (chunk_root)
2067 		free_root_extent_buffers(info->chunk_root);
2068 	free_root_extent_buffers(info->free_space_root);
2069 }
2070 
btrfs_free_fs_roots(struct btrfs_fs_info * fs_info)2071 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2072 {
2073 	int ret;
2074 	struct btrfs_root *gang[8];
2075 	int i;
2076 
2077 	while (!list_empty(&fs_info->dead_roots)) {
2078 		gang[0] = list_entry(fs_info->dead_roots.next,
2079 				     struct btrfs_root, root_list);
2080 		list_del(&gang[0]->root_list);
2081 
2082 		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2083 			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2084 		} else {
2085 			free_extent_buffer(gang[0]->node);
2086 			free_extent_buffer(gang[0]->commit_root);
2087 			btrfs_put_fs_root(gang[0]);
2088 		}
2089 	}
2090 
2091 	while (1) {
2092 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2093 					     (void **)gang, 0,
2094 					     ARRAY_SIZE(gang));
2095 		if (!ret)
2096 			break;
2097 		for (i = 0; i < ret; i++)
2098 			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2099 	}
2100 
2101 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2102 		btrfs_free_log_root_tree(NULL, fs_info);
2103 		btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2104 	}
2105 }
2106 
btrfs_init_scrub(struct btrfs_fs_info * fs_info)2107 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2108 {
2109 	mutex_init(&fs_info->scrub_lock);
2110 	atomic_set(&fs_info->scrubs_running, 0);
2111 	atomic_set(&fs_info->scrub_pause_req, 0);
2112 	atomic_set(&fs_info->scrubs_paused, 0);
2113 	atomic_set(&fs_info->scrub_cancel_req, 0);
2114 	init_waitqueue_head(&fs_info->scrub_pause_wait);
2115 	fs_info->scrub_workers_refcnt = 0;
2116 }
2117 
btrfs_init_balance(struct btrfs_fs_info * fs_info)2118 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2119 {
2120 	spin_lock_init(&fs_info->balance_lock);
2121 	mutex_init(&fs_info->balance_mutex);
2122 	atomic_set(&fs_info->balance_pause_req, 0);
2123 	atomic_set(&fs_info->balance_cancel_req, 0);
2124 	fs_info->balance_ctl = NULL;
2125 	init_waitqueue_head(&fs_info->balance_wait_q);
2126 }
2127 
btrfs_init_btree_inode(struct btrfs_fs_info * fs_info)2128 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2129 {
2130 	struct inode *inode = fs_info->btree_inode;
2131 
2132 	inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2133 	set_nlink(inode, 1);
2134 	/*
2135 	 * we set the i_size on the btree inode to the max possible int.
2136 	 * the real end of the address space is determined by all of
2137 	 * the devices in the system
2138 	 */
2139 	inode->i_size = OFFSET_MAX;
2140 	inode->i_mapping->a_ops = &btree_aops;
2141 
2142 	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2143 	extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode);
2144 	BTRFS_I(inode)->io_tree.track_uptodate = 0;
2145 	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2146 
2147 	BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2148 
2149 	BTRFS_I(inode)->root = fs_info->tree_root;
2150 	memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2151 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2152 	btrfs_insert_inode_hash(inode);
2153 }
2154 
btrfs_init_dev_replace_locks(struct btrfs_fs_info * fs_info)2155 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2156 {
2157 	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2158 	rwlock_init(&fs_info->dev_replace.lock);
2159 	atomic_set(&fs_info->dev_replace.read_locks, 0);
2160 	atomic_set(&fs_info->dev_replace.blocking_readers, 0);
2161 	init_waitqueue_head(&fs_info->replace_wait);
2162 	init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
2163 }
2164 
btrfs_init_qgroup(struct btrfs_fs_info * fs_info)2165 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2166 {
2167 	spin_lock_init(&fs_info->qgroup_lock);
2168 	mutex_init(&fs_info->qgroup_ioctl_lock);
2169 	fs_info->qgroup_tree = RB_ROOT;
2170 	fs_info->qgroup_op_tree = RB_ROOT;
2171 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2172 	fs_info->qgroup_seq = 1;
2173 	fs_info->qgroup_ulist = NULL;
2174 	fs_info->qgroup_rescan_running = false;
2175 	mutex_init(&fs_info->qgroup_rescan_lock);
2176 }
2177 
btrfs_init_workqueues(struct btrfs_fs_info * fs_info,struct btrfs_fs_devices * fs_devices)2178 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2179 		struct btrfs_fs_devices *fs_devices)
2180 {
2181 	u32 max_active = fs_info->thread_pool_size;
2182 	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2183 
2184 	fs_info->workers =
2185 		btrfs_alloc_workqueue(fs_info, "worker",
2186 				      flags | WQ_HIGHPRI, max_active, 16);
2187 
2188 	fs_info->delalloc_workers =
2189 		btrfs_alloc_workqueue(fs_info, "delalloc",
2190 				      flags, max_active, 2);
2191 
2192 	fs_info->flush_workers =
2193 		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2194 				      flags, max_active, 0);
2195 
2196 	fs_info->caching_workers =
2197 		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2198 
2199 	/*
2200 	 * a higher idle thresh on the submit workers makes it much more
2201 	 * likely that bios will be send down in a sane order to the
2202 	 * devices
2203 	 */
2204 	fs_info->submit_workers =
2205 		btrfs_alloc_workqueue(fs_info, "submit", flags,
2206 				      min_t(u64, fs_devices->num_devices,
2207 					    max_active), 64);
2208 
2209 	fs_info->fixup_workers =
2210 		btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2211 
2212 	/*
2213 	 * endios are largely parallel and should have a very
2214 	 * low idle thresh
2215 	 */
2216 	fs_info->endio_workers =
2217 		btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2218 	fs_info->endio_meta_workers =
2219 		btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2220 				      max_active, 4);
2221 	fs_info->endio_meta_write_workers =
2222 		btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2223 				      max_active, 2);
2224 	fs_info->endio_raid56_workers =
2225 		btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2226 				      max_active, 4);
2227 	fs_info->endio_repair_workers =
2228 		btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2229 	fs_info->rmw_workers =
2230 		btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2231 	fs_info->endio_write_workers =
2232 		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2233 				      max_active, 2);
2234 	fs_info->endio_freespace_worker =
2235 		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2236 				      max_active, 0);
2237 	fs_info->delayed_workers =
2238 		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2239 				      max_active, 0);
2240 	fs_info->readahead_workers =
2241 		btrfs_alloc_workqueue(fs_info, "readahead", flags,
2242 				      max_active, 2);
2243 	fs_info->qgroup_rescan_workers =
2244 		btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2245 	fs_info->extent_workers =
2246 		btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2247 				      min_t(u64, fs_devices->num_devices,
2248 					    max_active), 8);
2249 
2250 	if (!(fs_info->workers && fs_info->delalloc_workers &&
2251 	      fs_info->submit_workers && fs_info->flush_workers &&
2252 	      fs_info->endio_workers && fs_info->endio_meta_workers &&
2253 	      fs_info->endio_meta_write_workers &&
2254 	      fs_info->endio_repair_workers &&
2255 	      fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2256 	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2257 	      fs_info->caching_workers && fs_info->readahead_workers &&
2258 	      fs_info->fixup_workers && fs_info->delayed_workers &&
2259 	      fs_info->extent_workers &&
2260 	      fs_info->qgroup_rescan_workers)) {
2261 		return -ENOMEM;
2262 	}
2263 
2264 	return 0;
2265 }
2266 
btrfs_replay_log(struct btrfs_fs_info * fs_info,struct btrfs_fs_devices * fs_devices)2267 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2268 			    struct btrfs_fs_devices *fs_devices)
2269 {
2270 	int ret;
2271 	struct btrfs_root *log_tree_root;
2272 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2273 	u64 bytenr = btrfs_super_log_root(disk_super);
2274 	int level = btrfs_super_log_root_level(disk_super);
2275 
2276 	if (fs_devices->rw_devices == 0) {
2277 		btrfs_warn(fs_info, "log replay required on RO media");
2278 		return -EIO;
2279 	}
2280 
2281 	log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2282 	if (!log_tree_root)
2283 		return -ENOMEM;
2284 
2285 	__setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2286 
2287 	log_tree_root->node = read_tree_block(fs_info, bytenr,
2288 					      fs_info->generation + 1,
2289 					      level, NULL);
2290 	if (IS_ERR(log_tree_root->node)) {
2291 		btrfs_warn(fs_info, "failed to read log tree");
2292 		ret = PTR_ERR(log_tree_root->node);
2293 		kfree(log_tree_root);
2294 		return ret;
2295 	} else if (!extent_buffer_uptodate(log_tree_root->node)) {
2296 		btrfs_err(fs_info, "failed to read log tree");
2297 		free_extent_buffer(log_tree_root->node);
2298 		kfree(log_tree_root);
2299 		return -EIO;
2300 	}
2301 	/* returns with log_tree_root freed on success */
2302 	ret = btrfs_recover_log_trees(log_tree_root);
2303 	if (ret) {
2304 		btrfs_handle_fs_error(fs_info, ret,
2305 				      "Failed to recover log tree");
2306 		free_extent_buffer(log_tree_root->node);
2307 		kfree(log_tree_root);
2308 		return ret;
2309 	}
2310 
2311 	if (sb_rdonly(fs_info->sb)) {
2312 		ret = btrfs_commit_super(fs_info);
2313 		if (ret)
2314 			return ret;
2315 	}
2316 
2317 	return 0;
2318 }
2319 
btrfs_read_roots(struct btrfs_fs_info * fs_info)2320 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2321 {
2322 	struct btrfs_root *tree_root = fs_info->tree_root;
2323 	struct btrfs_root *root;
2324 	struct btrfs_key location;
2325 	int ret;
2326 
2327 	BUG_ON(!fs_info->tree_root);
2328 
2329 	location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2330 	location.type = BTRFS_ROOT_ITEM_KEY;
2331 	location.offset = 0;
2332 
2333 	root = btrfs_read_tree_root(tree_root, &location);
2334 	if (IS_ERR(root)) {
2335 		ret = PTR_ERR(root);
2336 		goto out;
2337 	}
2338 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2339 	fs_info->extent_root = root;
2340 
2341 	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2342 	root = btrfs_read_tree_root(tree_root, &location);
2343 	if (IS_ERR(root)) {
2344 		ret = PTR_ERR(root);
2345 		goto out;
2346 	}
2347 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2348 	fs_info->dev_root = root;
2349 	btrfs_init_devices_late(fs_info);
2350 
2351 	location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2352 	root = btrfs_read_tree_root(tree_root, &location);
2353 	if (IS_ERR(root)) {
2354 		ret = PTR_ERR(root);
2355 		goto out;
2356 	}
2357 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2358 	fs_info->csum_root = root;
2359 
2360 	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2361 	root = btrfs_read_tree_root(tree_root, &location);
2362 	if (!IS_ERR(root)) {
2363 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2364 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2365 		fs_info->quota_root = root;
2366 	}
2367 
2368 	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2369 	root = btrfs_read_tree_root(tree_root, &location);
2370 	if (IS_ERR(root)) {
2371 		ret = PTR_ERR(root);
2372 		if (ret != -ENOENT)
2373 			goto out;
2374 	} else {
2375 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2376 		fs_info->uuid_root = root;
2377 	}
2378 
2379 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2380 		location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2381 		root = btrfs_read_tree_root(tree_root, &location);
2382 		if (IS_ERR(root)) {
2383 			ret = PTR_ERR(root);
2384 			goto out;
2385 		}
2386 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2387 		fs_info->free_space_root = root;
2388 	}
2389 
2390 	return 0;
2391 out:
2392 	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2393 		   location.objectid, ret);
2394 	return ret;
2395 }
2396 
2397 /*
2398  * Real super block validation
2399  * NOTE: super csum type and incompat features will not be checked here.
2400  *
2401  * @sb:		super block to check
2402  * @mirror_num:	the super block number to check its bytenr:
2403  * 		0	the primary (1st) sb
2404  * 		1, 2	2nd and 3rd backup copy
2405  * 	       -1	skip bytenr check
2406  */
validate_super(struct btrfs_fs_info * fs_info,struct btrfs_super_block * sb,int mirror_num)2407 static int validate_super(struct btrfs_fs_info *fs_info,
2408 			    struct btrfs_super_block *sb, int mirror_num)
2409 {
2410 	u64 nodesize = btrfs_super_nodesize(sb);
2411 	u64 sectorsize = btrfs_super_sectorsize(sb);
2412 	int ret = 0;
2413 
2414 	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2415 		btrfs_err(fs_info, "no valid FS found");
2416 		ret = -EINVAL;
2417 	}
2418 	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
2419 		btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
2420 				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2421 		ret = -EINVAL;
2422 	}
2423 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2424 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2425 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2426 		ret = -EINVAL;
2427 	}
2428 	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2429 		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2430 				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2431 		ret = -EINVAL;
2432 	}
2433 	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2434 		btrfs_err(fs_info, "log_root level too big: %d >= %d",
2435 				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2436 		ret = -EINVAL;
2437 	}
2438 
2439 	/*
2440 	 * Check sectorsize and nodesize first, other check will need it.
2441 	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2442 	 */
2443 	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2444 	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2445 		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2446 		ret = -EINVAL;
2447 	}
2448 	/* Only PAGE SIZE is supported yet */
2449 	if (sectorsize != PAGE_SIZE) {
2450 		btrfs_err(fs_info,
2451 			"sectorsize %llu not supported yet, only support %lu",
2452 			sectorsize, PAGE_SIZE);
2453 		ret = -EINVAL;
2454 	}
2455 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2456 	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2457 		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2458 		ret = -EINVAL;
2459 	}
2460 	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2461 		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2462 			  le32_to_cpu(sb->__unused_leafsize), nodesize);
2463 		ret = -EINVAL;
2464 	}
2465 
2466 	/* Root alignment check */
2467 	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2468 		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2469 			   btrfs_super_root(sb));
2470 		ret = -EINVAL;
2471 	}
2472 	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2473 		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2474 			   btrfs_super_chunk_root(sb));
2475 		ret = -EINVAL;
2476 	}
2477 	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2478 		btrfs_warn(fs_info, "log_root block unaligned: %llu",
2479 			   btrfs_super_log_root(sb));
2480 		ret = -EINVAL;
2481 	}
2482 
2483 	if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_FSID_SIZE) != 0) {
2484 		btrfs_err(fs_info,
2485 			   "dev_item UUID does not match fsid: %pU != %pU",
2486 			   fs_info->fsid, sb->dev_item.fsid);
2487 		ret = -EINVAL;
2488 	}
2489 
2490 	/*
2491 	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2492 	 * done later
2493 	 */
2494 	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2495 		btrfs_err(fs_info, "bytes_used is too small %llu",
2496 			  btrfs_super_bytes_used(sb));
2497 		ret = -EINVAL;
2498 	}
2499 	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2500 		btrfs_err(fs_info, "invalid stripesize %u",
2501 			  btrfs_super_stripesize(sb));
2502 		ret = -EINVAL;
2503 	}
2504 	if (btrfs_super_num_devices(sb) > (1UL << 31))
2505 		btrfs_warn(fs_info, "suspicious number of devices: %llu",
2506 			   btrfs_super_num_devices(sb));
2507 	if (btrfs_super_num_devices(sb) == 0) {
2508 		btrfs_err(fs_info, "number of devices is 0");
2509 		ret = -EINVAL;
2510 	}
2511 
2512 	if (mirror_num >= 0 &&
2513 	    btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2514 		btrfs_err(fs_info, "super offset mismatch %llu != %u",
2515 			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2516 		ret = -EINVAL;
2517 	}
2518 
2519 	/*
2520 	 * Obvious sys_chunk_array corruptions, it must hold at least one key
2521 	 * and one chunk
2522 	 */
2523 	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2524 		btrfs_err(fs_info, "system chunk array too big %u > %u",
2525 			  btrfs_super_sys_array_size(sb),
2526 			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2527 		ret = -EINVAL;
2528 	}
2529 	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2530 			+ sizeof(struct btrfs_chunk)) {
2531 		btrfs_err(fs_info, "system chunk array too small %u < %zu",
2532 			  btrfs_super_sys_array_size(sb),
2533 			  sizeof(struct btrfs_disk_key)
2534 			  + sizeof(struct btrfs_chunk));
2535 		ret = -EINVAL;
2536 	}
2537 
2538 	/*
2539 	 * The generation is a global counter, we'll trust it more than the others
2540 	 * but it's still possible that it's the one that's wrong.
2541 	 */
2542 	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2543 		btrfs_warn(fs_info,
2544 			"suspicious: generation < chunk_root_generation: %llu < %llu",
2545 			btrfs_super_generation(sb),
2546 			btrfs_super_chunk_root_generation(sb));
2547 	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2548 	    && btrfs_super_cache_generation(sb) != (u64)-1)
2549 		btrfs_warn(fs_info,
2550 			"suspicious: generation < cache_generation: %llu < %llu",
2551 			btrfs_super_generation(sb),
2552 			btrfs_super_cache_generation(sb));
2553 
2554 	return ret;
2555 }
2556 
2557 /*
2558  * Validation of super block at mount time.
2559  * Some checks already done early at mount time, like csum type and incompat
2560  * flags will be skipped.
2561  */
btrfs_validate_mount_super(struct btrfs_fs_info * fs_info)2562 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2563 {
2564 	return validate_super(fs_info, fs_info->super_copy, 0);
2565 }
2566 
2567 /*
2568  * Validation of super block at write time.
2569  * Some checks like bytenr check will be skipped as their values will be
2570  * overwritten soon.
2571  * Extra checks like csum type and incompat flags will be done here.
2572  */
btrfs_validate_write_super(struct btrfs_fs_info * fs_info,struct btrfs_super_block * sb)2573 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2574 				      struct btrfs_super_block *sb)
2575 {
2576 	int ret;
2577 
2578 	ret = validate_super(fs_info, sb, -1);
2579 	if (ret < 0)
2580 		goto out;
2581 	if (btrfs_super_csum_type(sb) != BTRFS_CSUM_TYPE_CRC32) {
2582 		ret = -EUCLEAN;
2583 		btrfs_err(fs_info, "invalid csum type, has %u want %u",
2584 			  btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2585 		goto out;
2586 	}
2587 	if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2588 		ret = -EUCLEAN;
2589 		btrfs_err(fs_info,
2590 		"invalid incompat flags, has 0x%llx valid mask 0x%llx",
2591 			  btrfs_super_incompat_flags(sb),
2592 			  (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2593 		goto out;
2594 	}
2595 out:
2596 	if (ret < 0)
2597 		btrfs_err(fs_info,
2598 		"super block corruption detected before writing it to disk");
2599 	return ret;
2600 }
2601 
open_ctree(struct super_block * sb,struct btrfs_fs_devices * fs_devices,char * options)2602 int open_ctree(struct super_block *sb,
2603 	       struct btrfs_fs_devices *fs_devices,
2604 	       char *options)
2605 {
2606 	u32 sectorsize;
2607 	u32 nodesize;
2608 	u32 stripesize;
2609 	u64 generation;
2610 	u64 features;
2611 	struct btrfs_key location;
2612 	struct buffer_head *bh;
2613 	struct btrfs_super_block *disk_super;
2614 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2615 	struct btrfs_root *tree_root;
2616 	struct btrfs_root *chunk_root;
2617 	int ret;
2618 	int err = -EINVAL;
2619 	int num_backups_tried = 0;
2620 	int backup_index = 0;
2621 	int clear_free_space_tree = 0;
2622 	int level;
2623 
2624 	tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2625 	chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2626 	if (!tree_root || !chunk_root) {
2627 		err = -ENOMEM;
2628 		goto fail;
2629 	}
2630 
2631 	ret = init_srcu_struct(&fs_info->subvol_srcu);
2632 	if (ret) {
2633 		err = ret;
2634 		goto fail;
2635 	}
2636 
2637 	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2638 	if (ret) {
2639 		err = ret;
2640 		goto fail_srcu;
2641 	}
2642 	fs_info->dirty_metadata_batch = PAGE_SIZE *
2643 					(1 + ilog2(nr_cpu_ids));
2644 
2645 	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2646 	if (ret) {
2647 		err = ret;
2648 		goto fail_dirty_metadata_bytes;
2649 	}
2650 
2651 	ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
2652 	if (ret) {
2653 		err = ret;
2654 		goto fail_delalloc_bytes;
2655 	}
2656 
2657 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2658 	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2659 	INIT_LIST_HEAD(&fs_info->trans_list);
2660 	INIT_LIST_HEAD(&fs_info->dead_roots);
2661 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2662 	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2663 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2664 	INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
2665 	spin_lock_init(&fs_info->pending_raid_kobjs_lock);
2666 	spin_lock_init(&fs_info->delalloc_root_lock);
2667 	spin_lock_init(&fs_info->trans_lock);
2668 	spin_lock_init(&fs_info->fs_roots_radix_lock);
2669 	spin_lock_init(&fs_info->delayed_iput_lock);
2670 	spin_lock_init(&fs_info->defrag_inodes_lock);
2671 	spin_lock_init(&fs_info->tree_mod_seq_lock);
2672 	spin_lock_init(&fs_info->super_lock);
2673 	spin_lock_init(&fs_info->qgroup_op_lock);
2674 	spin_lock_init(&fs_info->buffer_lock);
2675 	spin_lock_init(&fs_info->unused_bgs_lock);
2676 	rwlock_init(&fs_info->tree_mod_log_lock);
2677 	mutex_init(&fs_info->unused_bg_unpin_mutex);
2678 	mutex_init(&fs_info->delete_unused_bgs_mutex);
2679 	mutex_init(&fs_info->reloc_mutex);
2680 	mutex_init(&fs_info->delalloc_root_mutex);
2681 	mutex_init(&fs_info->cleaner_delayed_iput_mutex);
2682 	seqlock_init(&fs_info->profiles_lock);
2683 
2684 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2685 	INIT_LIST_HEAD(&fs_info->space_info);
2686 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2687 	INIT_LIST_HEAD(&fs_info->unused_bgs);
2688 	btrfs_mapping_init(&fs_info->mapping_tree);
2689 	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2690 			     BTRFS_BLOCK_RSV_GLOBAL);
2691 	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2692 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2693 	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2694 	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2695 			     BTRFS_BLOCK_RSV_DELOPS);
2696 	atomic_set(&fs_info->async_delalloc_pages, 0);
2697 	atomic_set(&fs_info->defrag_running, 0);
2698 	atomic_set(&fs_info->qgroup_op_seq, 0);
2699 	atomic_set(&fs_info->reada_works_cnt, 0);
2700 	atomic64_set(&fs_info->tree_mod_seq, 0);
2701 	fs_info->sb = sb;
2702 	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2703 	fs_info->metadata_ratio = 0;
2704 	fs_info->defrag_inodes = RB_ROOT;
2705 	atomic64_set(&fs_info->free_chunk_space, 0);
2706 	fs_info->tree_mod_log = RB_ROOT;
2707 	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2708 	fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2709 	/* readahead state */
2710 	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2711 	spin_lock_init(&fs_info->reada_lock);
2712 	btrfs_init_ref_verify(fs_info);
2713 
2714 	fs_info->thread_pool_size = min_t(unsigned long,
2715 					  num_online_cpus() + 2, 8);
2716 
2717 	INIT_LIST_HEAD(&fs_info->ordered_roots);
2718 	spin_lock_init(&fs_info->ordered_root_lock);
2719 
2720 	fs_info->btree_inode = new_inode(sb);
2721 	if (!fs_info->btree_inode) {
2722 		err = -ENOMEM;
2723 		goto fail_bio_counter;
2724 	}
2725 	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2726 
2727 	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2728 					GFP_KERNEL);
2729 	if (!fs_info->delayed_root) {
2730 		err = -ENOMEM;
2731 		goto fail_iput;
2732 	}
2733 	btrfs_init_delayed_root(fs_info->delayed_root);
2734 
2735 	btrfs_init_scrub(fs_info);
2736 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2737 	fs_info->check_integrity_print_mask = 0;
2738 #endif
2739 	btrfs_init_balance(fs_info);
2740 	btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2741 
2742 	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2743 	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2744 
2745 	btrfs_init_btree_inode(fs_info);
2746 
2747 	spin_lock_init(&fs_info->block_group_cache_lock);
2748 	fs_info->block_group_cache_tree = RB_ROOT;
2749 	fs_info->first_logical_byte = (u64)-1;
2750 
2751 	extent_io_tree_init(&fs_info->freed_extents[0], NULL);
2752 	extent_io_tree_init(&fs_info->freed_extents[1], NULL);
2753 	fs_info->pinned_extents = &fs_info->freed_extents[0];
2754 	set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2755 
2756 	mutex_init(&fs_info->ordered_operations_mutex);
2757 	mutex_init(&fs_info->tree_log_mutex);
2758 	mutex_init(&fs_info->chunk_mutex);
2759 	mutex_init(&fs_info->transaction_kthread_mutex);
2760 	mutex_init(&fs_info->cleaner_mutex);
2761 	mutex_init(&fs_info->ro_block_group_mutex);
2762 	init_rwsem(&fs_info->commit_root_sem);
2763 	init_rwsem(&fs_info->cleanup_work_sem);
2764 	init_rwsem(&fs_info->subvol_sem);
2765 	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2766 
2767 	btrfs_init_dev_replace_locks(fs_info);
2768 	btrfs_init_qgroup(fs_info);
2769 
2770 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2771 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2772 
2773 	init_waitqueue_head(&fs_info->transaction_throttle);
2774 	init_waitqueue_head(&fs_info->transaction_wait);
2775 	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2776 	init_waitqueue_head(&fs_info->async_submit_wait);
2777 
2778 	INIT_LIST_HEAD(&fs_info->pinned_chunks);
2779 
2780 	/* Usable values until the real ones are cached from the superblock */
2781 	fs_info->nodesize = 4096;
2782 	fs_info->sectorsize = 4096;
2783 	fs_info->stripesize = 4096;
2784 
2785 	ret = btrfs_alloc_stripe_hash_table(fs_info);
2786 	if (ret) {
2787 		err = ret;
2788 		goto fail_alloc;
2789 	}
2790 
2791 	__setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2792 
2793 	invalidate_bdev(fs_devices->latest_bdev);
2794 
2795 	/*
2796 	 * Read super block and check the signature bytes only
2797 	 */
2798 	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2799 	if (IS_ERR(bh)) {
2800 		err = PTR_ERR(bh);
2801 		goto fail_alloc;
2802 	}
2803 
2804 	/*
2805 	 * We want to check superblock checksum, the type is stored inside.
2806 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2807 	 */
2808 	if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2809 		btrfs_err(fs_info, "superblock checksum mismatch");
2810 		err = -EINVAL;
2811 		brelse(bh);
2812 		goto fail_alloc;
2813 	}
2814 
2815 	/*
2816 	 * super_copy is zeroed at allocation time and we never touch the
2817 	 * following bytes up to INFO_SIZE, the checksum is calculated from
2818 	 * the whole block of INFO_SIZE
2819 	 */
2820 	memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2821 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2822 	       sizeof(*fs_info->super_for_commit));
2823 	brelse(bh);
2824 
2825 	memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2826 
2827 	ret = btrfs_validate_mount_super(fs_info);
2828 	if (ret) {
2829 		btrfs_err(fs_info, "superblock contains fatal errors");
2830 		err = -EINVAL;
2831 		goto fail_alloc;
2832 	}
2833 
2834 	disk_super = fs_info->super_copy;
2835 	if (!btrfs_super_root(disk_super))
2836 		goto fail_alloc;
2837 
2838 	/* check FS state, whether FS is broken. */
2839 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2840 		set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2841 
2842 	/*
2843 	 * run through our array of backup supers and setup
2844 	 * our ring pointer to the oldest one
2845 	 */
2846 	generation = btrfs_super_generation(disk_super);
2847 	find_oldest_super_backup(fs_info, generation);
2848 
2849 	/*
2850 	 * In the long term, we'll store the compression type in the super
2851 	 * block, and it'll be used for per file compression control.
2852 	 */
2853 	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2854 
2855 	ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2856 	if (ret) {
2857 		err = ret;
2858 		goto fail_alloc;
2859 	}
2860 
2861 	features = btrfs_super_incompat_flags(disk_super) &
2862 		~BTRFS_FEATURE_INCOMPAT_SUPP;
2863 	if (features) {
2864 		btrfs_err(fs_info,
2865 		    "cannot mount because of unsupported optional features (%llx)",
2866 		    features);
2867 		err = -EINVAL;
2868 		goto fail_alloc;
2869 	}
2870 
2871 	features = btrfs_super_incompat_flags(disk_super);
2872 	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2873 	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2874 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2875 	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
2876 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
2877 
2878 	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2879 		btrfs_info(fs_info, "has skinny extents");
2880 
2881 	/*
2882 	 * flag our filesystem as having big metadata blocks if
2883 	 * they are bigger than the page size
2884 	 */
2885 	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2886 		if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2887 			btrfs_info(fs_info,
2888 				"flagging fs with big metadata feature");
2889 		features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2890 	}
2891 
2892 	nodesize = btrfs_super_nodesize(disk_super);
2893 	sectorsize = btrfs_super_sectorsize(disk_super);
2894 	stripesize = sectorsize;
2895 	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2896 	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2897 
2898 	/* Cache block sizes */
2899 	fs_info->nodesize = nodesize;
2900 	fs_info->sectorsize = sectorsize;
2901 	fs_info->stripesize = stripesize;
2902 
2903 	/*
2904 	 * mixed block groups end up with duplicate but slightly offset
2905 	 * extent buffers for the same range.  It leads to corruptions
2906 	 */
2907 	if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2908 	    (sectorsize != nodesize)) {
2909 		btrfs_err(fs_info,
2910 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2911 			nodesize, sectorsize);
2912 		goto fail_alloc;
2913 	}
2914 
2915 	/*
2916 	 * Needn't use the lock because there is no other task which will
2917 	 * update the flag.
2918 	 */
2919 	btrfs_set_super_incompat_flags(disk_super, features);
2920 
2921 	features = btrfs_super_compat_ro_flags(disk_super) &
2922 		~BTRFS_FEATURE_COMPAT_RO_SUPP;
2923 	if (!sb_rdonly(sb) && features) {
2924 		btrfs_err(fs_info,
2925 	"cannot mount read-write because of unsupported optional features (%llx)",
2926 		       features);
2927 		err = -EINVAL;
2928 		goto fail_alloc;
2929 	}
2930 
2931 	ret = btrfs_init_workqueues(fs_info, fs_devices);
2932 	if (ret) {
2933 		err = ret;
2934 		goto fail_sb_buffer;
2935 	}
2936 
2937 	sb->s_bdi->congested_fn = btrfs_congested_fn;
2938 	sb->s_bdi->congested_data = fs_info;
2939 	sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
2940 	sb->s_bdi->ra_pages = VM_MAX_READAHEAD * SZ_1K / PAGE_SIZE;
2941 	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
2942 	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
2943 
2944 	sb->s_blocksize = sectorsize;
2945 	sb->s_blocksize_bits = blksize_bits(sectorsize);
2946 	memcpy(&sb->s_uuid, fs_info->fsid, BTRFS_FSID_SIZE);
2947 
2948 	mutex_lock(&fs_info->chunk_mutex);
2949 	ret = btrfs_read_sys_array(fs_info);
2950 	mutex_unlock(&fs_info->chunk_mutex);
2951 	if (ret) {
2952 		btrfs_err(fs_info, "failed to read the system array: %d", ret);
2953 		goto fail_sb_buffer;
2954 	}
2955 
2956 	generation = btrfs_super_chunk_root_generation(disk_super);
2957 	level = btrfs_super_chunk_root_level(disk_super);
2958 
2959 	__setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2960 
2961 	chunk_root->node = read_tree_block(fs_info,
2962 					   btrfs_super_chunk_root(disk_super),
2963 					   generation, level, NULL);
2964 	if (IS_ERR(chunk_root->node) ||
2965 	    !extent_buffer_uptodate(chunk_root->node)) {
2966 		btrfs_err(fs_info, "failed to read chunk root");
2967 		if (!IS_ERR(chunk_root->node))
2968 			free_extent_buffer(chunk_root->node);
2969 		chunk_root->node = NULL;
2970 		goto fail_tree_roots;
2971 	}
2972 	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2973 	chunk_root->commit_root = btrfs_root_node(chunk_root);
2974 
2975 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2976 	   btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2977 
2978 	ret = btrfs_read_chunk_tree(fs_info);
2979 	if (ret) {
2980 		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
2981 		goto fail_tree_roots;
2982 	}
2983 
2984 	/*
2985 	 * Keep the devid that is marked to be the target device for the
2986 	 * device replace procedure
2987 	 */
2988 	btrfs_free_extra_devids(fs_devices, 0);
2989 
2990 	if (!fs_devices->latest_bdev) {
2991 		btrfs_err(fs_info, "failed to read devices");
2992 		goto fail_tree_roots;
2993 	}
2994 
2995 retry_root_backup:
2996 	generation = btrfs_super_generation(disk_super);
2997 	level = btrfs_super_root_level(disk_super);
2998 
2999 	tree_root->node = read_tree_block(fs_info,
3000 					  btrfs_super_root(disk_super),
3001 					  generation, level, NULL);
3002 	if (IS_ERR(tree_root->node) ||
3003 	    !extent_buffer_uptodate(tree_root->node)) {
3004 		btrfs_warn(fs_info, "failed to read tree root");
3005 		if (!IS_ERR(tree_root->node))
3006 			free_extent_buffer(tree_root->node);
3007 		tree_root->node = NULL;
3008 		goto recovery_tree_root;
3009 	}
3010 
3011 	btrfs_set_root_node(&tree_root->root_item, tree_root->node);
3012 	tree_root->commit_root = btrfs_root_node(tree_root);
3013 	btrfs_set_root_refs(&tree_root->root_item, 1);
3014 
3015 	mutex_lock(&tree_root->objectid_mutex);
3016 	ret = btrfs_find_highest_objectid(tree_root,
3017 					&tree_root->highest_objectid);
3018 	if (ret) {
3019 		mutex_unlock(&tree_root->objectid_mutex);
3020 		goto recovery_tree_root;
3021 	}
3022 
3023 	ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
3024 
3025 	mutex_unlock(&tree_root->objectid_mutex);
3026 
3027 	ret = btrfs_read_roots(fs_info);
3028 	if (ret)
3029 		goto recovery_tree_root;
3030 
3031 	fs_info->generation = generation;
3032 	fs_info->last_trans_committed = generation;
3033 
3034 	ret = btrfs_verify_dev_extents(fs_info);
3035 	if (ret) {
3036 		btrfs_err(fs_info,
3037 			  "failed to verify dev extents against chunks: %d",
3038 			  ret);
3039 		goto fail_block_groups;
3040 	}
3041 	ret = btrfs_recover_balance(fs_info);
3042 	if (ret) {
3043 		btrfs_err(fs_info, "failed to recover balance: %d", ret);
3044 		goto fail_block_groups;
3045 	}
3046 
3047 	ret = btrfs_init_dev_stats(fs_info);
3048 	if (ret) {
3049 		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3050 		goto fail_block_groups;
3051 	}
3052 
3053 	ret = btrfs_init_dev_replace(fs_info);
3054 	if (ret) {
3055 		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3056 		goto fail_block_groups;
3057 	}
3058 
3059 	btrfs_free_extra_devids(fs_devices, 1);
3060 
3061 	ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
3062 	if (ret) {
3063 		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3064 				ret);
3065 		goto fail_block_groups;
3066 	}
3067 
3068 	ret = btrfs_sysfs_add_device(fs_devices);
3069 	if (ret) {
3070 		btrfs_err(fs_info, "failed to init sysfs device interface: %d",
3071 				ret);
3072 		goto fail_fsdev_sysfs;
3073 	}
3074 
3075 	ret = btrfs_sysfs_add_mounted(fs_info);
3076 	if (ret) {
3077 		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3078 		goto fail_fsdev_sysfs;
3079 	}
3080 
3081 	ret = btrfs_init_space_info(fs_info);
3082 	if (ret) {
3083 		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3084 		goto fail_sysfs;
3085 	}
3086 
3087 	ret = btrfs_read_block_groups(fs_info);
3088 	if (ret) {
3089 		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3090 		goto fail_sysfs;
3091 	}
3092 
3093 	if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
3094 		btrfs_warn(fs_info,
3095 		"writeable mount is not allowed due to too many missing devices");
3096 		goto fail_sysfs;
3097 	}
3098 
3099 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3100 					       "btrfs-cleaner");
3101 	if (IS_ERR(fs_info->cleaner_kthread))
3102 		goto fail_sysfs;
3103 
3104 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
3105 						   tree_root,
3106 						   "btrfs-transaction");
3107 	if (IS_ERR(fs_info->transaction_kthread))
3108 		goto fail_cleaner;
3109 
3110 	if (!btrfs_test_opt(fs_info, NOSSD) &&
3111 	    !fs_info->fs_devices->rotating) {
3112 		btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
3113 	}
3114 
3115 	/*
3116 	 * Mount does not set all options immediately, we can do it now and do
3117 	 * not have to wait for transaction commit
3118 	 */
3119 	btrfs_apply_pending_changes(fs_info);
3120 
3121 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3122 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3123 		ret = btrfsic_mount(fs_info, fs_devices,
3124 				    btrfs_test_opt(fs_info,
3125 					CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3126 				    1 : 0,
3127 				    fs_info->check_integrity_print_mask);
3128 		if (ret)
3129 			btrfs_warn(fs_info,
3130 				"failed to initialize integrity check module: %d",
3131 				ret);
3132 	}
3133 #endif
3134 	ret = btrfs_read_qgroup_config(fs_info);
3135 	if (ret)
3136 		goto fail_trans_kthread;
3137 
3138 	if (btrfs_build_ref_tree(fs_info))
3139 		btrfs_err(fs_info, "couldn't build ref tree");
3140 
3141 	/* do not make disk changes in broken FS or nologreplay is given */
3142 	if (btrfs_super_log_root(disk_super) != 0 &&
3143 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3144 		ret = btrfs_replay_log(fs_info, fs_devices);
3145 		if (ret) {
3146 			err = ret;
3147 			goto fail_qgroup;
3148 		}
3149 	}
3150 
3151 	ret = btrfs_find_orphan_roots(fs_info);
3152 	if (ret)
3153 		goto fail_qgroup;
3154 
3155 	if (!sb_rdonly(sb)) {
3156 		ret = btrfs_cleanup_fs_roots(fs_info);
3157 		if (ret)
3158 			goto fail_qgroup;
3159 
3160 		mutex_lock(&fs_info->cleaner_mutex);
3161 		ret = btrfs_recover_relocation(tree_root);
3162 		mutex_unlock(&fs_info->cleaner_mutex);
3163 		if (ret < 0) {
3164 			btrfs_warn(fs_info, "failed to recover relocation: %d",
3165 					ret);
3166 			err = -EINVAL;
3167 			goto fail_qgroup;
3168 		}
3169 	}
3170 
3171 	location.objectid = BTRFS_FS_TREE_OBJECTID;
3172 	location.type = BTRFS_ROOT_ITEM_KEY;
3173 	location.offset = 0;
3174 
3175 	fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3176 	if (IS_ERR(fs_info->fs_root)) {
3177 		err = PTR_ERR(fs_info->fs_root);
3178 		btrfs_warn(fs_info, "failed to read fs tree: %d", err);
3179 		goto fail_qgroup;
3180 	}
3181 
3182 	if (sb_rdonly(sb))
3183 		return 0;
3184 
3185 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3186 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3187 		clear_free_space_tree = 1;
3188 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3189 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3190 		btrfs_warn(fs_info, "free space tree is invalid");
3191 		clear_free_space_tree = 1;
3192 	}
3193 
3194 	if (clear_free_space_tree) {
3195 		btrfs_info(fs_info, "clearing free space tree");
3196 		ret = btrfs_clear_free_space_tree(fs_info);
3197 		if (ret) {
3198 			btrfs_warn(fs_info,
3199 				   "failed to clear free space tree: %d", ret);
3200 			close_ctree(fs_info);
3201 			return ret;
3202 		}
3203 	}
3204 
3205 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3206 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3207 		btrfs_info(fs_info, "creating free space tree");
3208 		ret = btrfs_create_free_space_tree(fs_info);
3209 		if (ret) {
3210 			btrfs_warn(fs_info,
3211 				"failed to create free space tree: %d", ret);
3212 			close_ctree(fs_info);
3213 			return ret;
3214 		}
3215 	}
3216 
3217 	down_read(&fs_info->cleanup_work_sem);
3218 	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3219 	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3220 		up_read(&fs_info->cleanup_work_sem);
3221 		close_ctree(fs_info);
3222 		return ret;
3223 	}
3224 	up_read(&fs_info->cleanup_work_sem);
3225 
3226 	ret = btrfs_resume_balance_async(fs_info);
3227 	if (ret) {
3228 		btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3229 		close_ctree(fs_info);
3230 		return ret;
3231 	}
3232 
3233 	ret = btrfs_resume_dev_replace_async(fs_info);
3234 	if (ret) {
3235 		btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3236 		close_ctree(fs_info);
3237 		return ret;
3238 	}
3239 
3240 	btrfs_qgroup_rescan_resume(fs_info);
3241 
3242 	if (!fs_info->uuid_root) {
3243 		btrfs_info(fs_info, "creating UUID tree");
3244 		ret = btrfs_create_uuid_tree(fs_info);
3245 		if (ret) {
3246 			btrfs_warn(fs_info,
3247 				"failed to create the UUID tree: %d", ret);
3248 			close_ctree(fs_info);
3249 			return ret;
3250 		}
3251 	} else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3252 		   fs_info->generation !=
3253 				btrfs_super_uuid_tree_generation(disk_super)) {
3254 		btrfs_info(fs_info, "checking UUID tree");
3255 		ret = btrfs_check_uuid_tree(fs_info);
3256 		if (ret) {
3257 			btrfs_warn(fs_info,
3258 				"failed to check the UUID tree: %d", ret);
3259 			close_ctree(fs_info);
3260 			return ret;
3261 		}
3262 	} else {
3263 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3264 	}
3265 	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3266 
3267 	/*
3268 	 * backuproot only affect mount behavior, and if open_ctree succeeded,
3269 	 * no need to keep the flag
3270 	 */
3271 	btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3272 
3273 	return 0;
3274 
3275 fail_qgroup:
3276 	btrfs_free_qgroup_config(fs_info);
3277 fail_trans_kthread:
3278 	kthread_stop(fs_info->transaction_kthread);
3279 	btrfs_cleanup_transaction(fs_info);
3280 	btrfs_free_fs_roots(fs_info);
3281 fail_cleaner:
3282 	kthread_stop(fs_info->cleaner_kthread);
3283 
3284 	/*
3285 	 * make sure we're done with the btree inode before we stop our
3286 	 * kthreads
3287 	 */
3288 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3289 
3290 fail_sysfs:
3291 	btrfs_sysfs_remove_mounted(fs_info);
3292 
3293 fail_fsdev_sysfs:
3294 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3295 
3296 fail_block_groups:
3297 	btrfs_put_block_group_cache(fs_info);
3298 
3299 fail_tree_roots:
3300 	free_root_pointers(fs_info, 1);
3301 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3302 
3303 fail_sb_buffer:
3304 	btrfs_stop_all_workers(fs_info);
3305 	btrfs_free_block_groups(fs_info);
3306 fail_alloc:
3307 fail_iput:
3308 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
3309 
3310 	iput(fs_info->btree_inode);
3311 fail_bio_counter:
3312 	percpu_counter_destroy(&fs_info->bio_counter);
3313 fail_delalloc_bytes:
3314 	percpu_counter_destroy(&fs_info->delalloc_bytes);
3315 fail_dirty_metadata_bytes:
3316 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3317 fail_srcu:
3318 	cleanup_srcu_struct(&fs_info->subvol_srcu);
3319 fail:
3320 	btrfs_free_stripe_hash_table(fs_info);
3321 	btrfs_close_devices(fs_info->fs_devices);
3322 	return err;
3323 
3324 recovery_tree_root:
3325 	if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
3326 		goto fail_tree_roots;
3327 
3328 	free_root_pointers(fs_info, 0);
3329 
3330 	/* don't use the log in recovery mode, it won't be valid */
3331 	btrfs_set_super_log_root(disk_super, 0);
3332 
3333 	/* we can't trust the free space cache either */
3334 	btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3335 
3336 	ret = next_root_backup(fs_info, fs_info->super_copy,
3337 			       &num_backups_tried, &backup_index);
3338 	if (ret == -1)
3339 		goto fail_block_groups;
3340 	goto retry_root_backup;
3341 }
3342 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3343 
btrfs_end_buffer_write_sync(struct buffer_head * bh,int uptodate)3344 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3345 {
3346 	if (uptodate) {
3347 		set_buffer_uptodate(bh);
3348 	} else {
3349 		struct btrfs_device *device = (struct btrfs_device *)
3350 			bh->b_private;
3351 
3352 		btrfs_warn_rl_in_rcu(device->fs_info,
3353 				"lost page write due to IO error on %s",
3354 					  rcu_str_deref(device->name));
3355 		/* note, we don't set_buffer_write_io_error because we have
3356 		 * our own ways of dealing with the IO errors
3357 		 */
3358 		clear_buffer_uptodate(bh);
3359 		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3360 	}
3361 	unlock_buffer(bh);
3362 	put_bh(bh);
3363 }
3364 
btrfs_read_dev_one_super(struct block_device * bdev,int copy_num,struct buffer_head ** bh_ret)3365 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3366 			struct buffer_head **bh_ret)
3367 {
3368 	struct buffer_head *bh;
3369 	struct btrfs_super_block *super;
3370 	u64 bytenr;
3371 
3372 	bytenr = btrfs_sb_offset(copy_num);
3373 	if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3374 		return -EINVAL;
3375 
3376 	bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE);
3377 	/*
3378 	 * If we fail to read from the underlying devices, as of now
3379 	 * the best option we have is to mark it EIO.
3380 	 */
3381 	if (!bh)
3382 		return -EIO;
3383 
3384 	super = (struct btrfs_super_block *)bh->b_data;
3385 	if (btrfs_super_bytenr(super) != bytenr ||
3386 		    btrfs_super_magic(super) != BTRFS_MAGIC) {
3387 		brelse(bh);
3388 		return -EINVAL;
3389 	}
3390 
3391 	*bh_ret = bh;
3392 	return 0;
3393 }
3394 
3395 
btrfs_read_dev_super(struct block_device * bdev)3396 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3397 {
3398 	struct buffer_head *bh;
3399 	struct buffer_head *latest = NULL;
3400 	struct btrfs_super_block *super;
3401 	int i;
3402 	u64 transid = 0;
3403 	int ret = -EINVAL;
3404 
3405 	/* we would like to check all the supers, but that would make
3406 	 * a btrfs mount succeed after a mkfs from a different FS.
3407 	 * So, we need to add a special mount option to scan for
3408 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3409 	 */
3410 	for (i = 0; i < 1; i++) {
3411 		ret = btrfs_read_dev_one_super(bdev, i, &bh);
3412 		if (ret)
3413 			continue;
3414 
3415 		super = (struct btrfs_super_block *)bh->b_data;
3416 
3417 		if (!latest || btrfs_super_generation(super) > transid) {
3418 			brelse(latest);
3419 			latest = bh;
3420 			transid = btrfs_super_generation(super);
3421 		} else {
3422 			brelse(bh);
3423 		}
3424 	}
3425 
3426 	if (!latest)
3427 		return ERR_PTR(ret);
3428 
3429 	return latest;
3430 }
3431 
3432 /*
3433  * Write superblock @sb to the @device. Do not wait for completion, all the
3434  * buffer heads we write are pinned.
3435  *
3436  * Write @max_mirrors copies of the superblock, where 0 means default that fit
3437  * the expected device size at commit time. Note that max_mirrors must be
3438  * same for write and wait phases.
3439  *
3440  * Return number of errors when buffer head is not found or submission fails.
3441  */
write_dev_supers(struct btrfs_device * device,struct btrfs_super_block * sb,int max_mirrors)3442 static int write_dev_supers(struct btrfs_device *device,
3443 			    struct btrfs_super_block *sb, int max_mirrors)
3444 {
3445 	struct buffer_head *bh;
3446 	int i;
3447 	int ret;
3448 	int errors = 0;
3449 	u32 crc;
3450 	u64 bytenr;
3451 	int op_flags;
3452 
3453 	if (max_mirrors == 0)
3454 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3455 
3456 	for (i = 0; i < max_mirrors; i++) {
3457 		bytenr = btrfs_sb_offset(i);
3458 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3459 		    device->commit_total_bytes)
3460 			break;
3461 
3462 		btrfs_set_super_bytenr(sb, bytenr);
3463 
3464 		crc = ~(u32)0;
3465 		crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc,
3466 				      BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
3467 		btrfs_csum_final(crc, sb->csum);
3468 
3469 		/* One reference for us, and we leave it for the caller */
3470 		bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE,
3471 			      BTRFS_SUPER_INFO_SIZE);
3472 		if (!bh) {
3473 			btrfs_err(device->fs_info,
3474 			    "couldn't get super buffer head for bytenr %llu",
3475 			    bytenr);
3476 			errors++;
3477 			continue;
3478 		}
3479 
3480 		memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3481 
3482 		/* one reference for submit_bh */
3483 		get_bh(bh);
3484 
3485 		set_buffer_uptodate(bh);
3486 		lock_buffer(bh);
3487 		bh->b_end_io = btrfs_end_buffer_write_sync;
3488 		bh->b_private = device;
3489 
3490 		/*
3491 		 * we fua the first super.  The others we allow
3492 		 * to go down lazy.
3493 		 */
3494 		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
3495 		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3496 			op_flags |= REQ_FUA;
3497 		ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
3498 		if (ret)
3499 			errors++;
3500 	}
3501 	return errors < i ? 0 : -1;
3502 }
3503 
3504 /*
3505  * Wait for write completion of superblocks done by write_dev_supers,
3506  * @max_mirrors same for write and wait phases.
3507  *
3508  * Return number of errors when buffer head is not found or not marked up to
3509  * date.
3510  */
wait_dev_supers(struct btrfs_device * device,int max_mirrors)3511 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3512 {
3513 	struct buffer_head *bh;
3514 	int i;
3515 	int errors = 0;
3516 	bool primary_failed = false;
3517 	u64 bytenr;
3518 
3519 	if (max_mirrors == 0)
3520 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3521 
3522 	for (i = 0; i < max_mirrors; i++) {
3523 		bytenr = btrfs_sb_offset(i);
3524 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3525 		    device->commit_total_bytes)
3526 			break;
3527 
3528 		bh = __find_get_block(device->bdev,
3529 				      bytenr / BTRFS_BDEV_BLOCKSIZE,
3530 				      BTRFS_SUPER_INFO_SIZE);
3531 		if (!bh) {
3532 			errors++;
3533 			if (i == 0)
3534 				primary_failed = true;
3535 			continue;
3536 		}
3537 		wait_on_buffer(bh);
3538 		if (!buffer_uptodate(bh)) {
3539 			errors++;
3540 			if (i == 0)
3541 				primary_failed = true;
3542 		}
3543 
3544 		/* drop our reference */
3545 		brelse(bh);
3546 
3547 		/* drop the reference from the writing run */
3548 		brelse(bh);
3549 	}
3550 
3551 	/* log error, force error return */
3552 	if (primary_failed) {
3553 		btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3554 			  device->devid);
3555 		return -1;
3556 	}
3557 
3558 	return errors < i ? 0 : -1;
3559 }
3560 
3561 /*
3562  * endio for the write_dev_flush, this will wake anyone waiting
3563  * for the barrier when it is done
3564  */
btrfs_end_empty_barrier(struct bio * bio)3565 static void btrfs_end_empty_barrier(struct bio *bio)
3566 {
3567 	complete(bio->bi_private);
3568 }
3569 
3570 /*
3571  * Submit a flush request to the device if it supports it. Error handling is
3572  * done in the waiting counterpart.
3573  */
write_dev_flush(struct btrfs_device * device)3574 static void write_dev_flush(struct btrfs_device *device)
3575 {
3576 	struct request_queue *q = bdev_get_queue(device->bdev);
3577 	struct bio *bio = device->flush_bio;
3578 
3579 	if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3580 		return;
3581 
3582 	bio_reset(bio);
3583 	bio->bi_end_io = btrfs_end_empty_barrier;
3584 	bio_set_dev(bio, device->bdev);
3585 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3586 	init_completion(&device->flush_wait);
3587 	bio->bi_private = &device->flush_wait;
3588 
3589 	btrfsic_submit_bio(bio);
3590 	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3591 }
3592 
3593 /*
3594  * If the flush bio has been submitted by write_dev_flush, wait for it.
3595  */
wait_dev_flush(struct btrfs_device * device)3596 static blk_status_t wait_dev_flush(struct btrfs_device *device)
3597 {
3598 	struct bio *bio = device->flush_bio;
3599 
3600 	if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3601 		return BLK_STS_OK;
3602 
3603 	clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3604 	wait_for_completion_io(&device->flush_wait);
3605 
3606 	return bio->bi_status;
3607 }
3608 
check_barrier_error(struct btrfs_fs_info * fs_info)3609 static int check_barrier_error(struct btrfs_fs_info *fs_info)
3610 {
3611 	if (!btrfs_check_rw_degradable(fs_info, NULL))
3612 		return -EIO;
3613 	return 0;
3614 }
3615 
3616 /*
3617  * send an empty flush down to each device in parallel,
3618  * then wait for them
3619  */
barrier_all_devices(struct btrfs_fs_info * info)3620 static int barrier_all_devices(struct btrfs_fs_info *info)
3621 {
3622 	struct list_head *head;
3623 	struct btrfs_device *dev;
3624 	int errors_wait = 0;
3625 	blk_status_t ret;
3626 
3627 	lockdep_assert_held(&info->fs_devices->device_list_mutex);
3628 	/* send down all the barriers */
3629 	head = &info->fs_devices->devices;
3630 	list_for_each_entry(dev, head, dev_list) {
3631 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3632 			continue;
3633 		if (!dev->bdev)
3634 			continue;
3635 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3636 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3637 			continue;
3638 
3639 		write_dev_flush(dev);
3640 		dev->last_flush_error = BLK_STS_OK;
3641 	}
3642 
3643 	/* wait for all the barriers */
3644 	list_for_each_entry(dev, head, dev_list) {
3645 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3646 			continue;
3647 		if (!dev->bdev) {
3648 			errors_wait++;
3649 			continue;
3650 		}
3651 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3652 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3653 			continue;
3654 
3655 		ret = wait_dev_flush(dev);
3656 		if (ret) {
3657 			dev->last_flush_error = ret;
3658 			btrfs_dev_stat_inc_and_print(dev,
3659 					BTRFS_DEV_STAT_FLUSH_ERRS);
3660 			errors_wait++;
3661 		}
3662 	}
3663 
3664 	if (errors_wait) {
3665 		/*
3666 		 * At some point we need the status of all disks
3667 		 * to arrive at the volume status. So error checking
3668 		 * is being pushed to a separate loop.
3669 		 */
3670 		return check_barrier_error(info);
3671 	}
3672 	return 0;
3673 }
3674 
btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)3675 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3676 {
3677 	int raid_type;
3678 	int min_tolerated = INT_MAX;
3679 
3680 	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3681 	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3682 		min_tolerated = min(min_tolerated,
3683 				    btrfs_raid_array[BTRFS_RAID_SINGLE].
3684 				    tolerated_failures);
3685 
3686 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3687 		if (raid_type == BTRFS_RAID_SINGLE)
3688 			continue;
3689 		if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3690 			continue;
3691 		min_tolerated = min(min_tolerated,
3692 				    btrfs_raid_array[raid_type].
3693 				    tolerated_failures);
3694 	}
3695 
3696 	if (min_tolerated == INT_MAX) {
3697 		pr_warn("BTRFS: unknown raid flag: %llu", flags);
3698 		min_tolerated = 0;
3699 	}
3700 
3701 	return min_tolerated;
3702 }
3703 
write_all_supers(struct btrfs_fs_info * fs_info,int max_mirrors)3704 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3705 {
3706 	struct list_head *head;
3707 	struct btrfs_device *dev;
3708 	struct btrfs_super_block *sb;
3709 	struct btrfs_dev_item *dev_item;
3710 	int ret;
3711 	int do_barriers;
3712 	int max_errors;
3713 	int total_errors = 0;
3714 	u64 flags;
3715 
3716 	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3717 
3718 	/*
3719 	 * max_mirrors == 0 indicates we're from commit_transaction,
3720 	 * not from fsync where the tree roots in fs_info have not
3721 	 * been consistent on disk.
3722 	 */
3723 	if (max_mirrors == 0)
3724 		backup_super_roots(fs_info);
3725 
3726 	sb = fs_info->super_for_commit;
3727 	dev_item = &sb->dev_item;
3728 
3729 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3730 	head = &fs_info->fs_devices->devices;
3731 	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3732 
3733 	if (do_barriers) {
3734 		ret = barrier_all_devices(fs_info);
3735 		if (ret) {
3736 			mutex_unlock(
3737 				&fs_info->fs_devices->device_list_mutex);
3738 			btrfs_handle_fs_error(fs_info, ret,
3739 					      "errors while submitting device barriers.");
3740 			return ret;
3741 		}
3742 	}
3743 
3744 	list_for_each_entry(dev, head, dev_list) {
3745 		if (!dev->bdev) {
3746 			total_errors++;
3747 			continue;
3748 		}
3749 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3750 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3751 			continue;
3752 
3753 		btrfs_set_stack_device_generation(dev_item, 0);
3754 		btrfs_set_stack_device_type(dev_item, dev->type);
3755 		btrfs_set_stack_device_id(dev_item, dev->devid);
3756 		btrfs_set_stack_device_total_bytes(dev_item,
3757 						   dev->commit_total_bytes);
3758 		btrfs_set_stack_device_bytes_used(dev_item,
3759 						  dev->commit_bytes_used);
3760 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3761 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3762 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3763 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3764 		memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_FSID_SIZE);
3765 
3766 		flags = btrfs_super_flags(sb);
3767 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3768 
3769 		ret = btrfs_validate_write_super(fs_info, sb);
3770 		if (ret < 0) {
3771 			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3772 			btrfs_handle_fs_error(fs_info, -EUCLEAN,
3773 				"unexpected superblock corruption detected");
3774 			return -EUCLEAN;
3775 		}
3776 
3777 		ret = write_dev_supers(dev, sb, max_mirrors);
3778 		if (ret)
3779 			total_errors++;
3780 	}
3781 	if (total_errors > max_errors) {
3782 		btrfs_err(fs_info, "%d errors while writing supers",
3783 			  total_errors);
3784 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3785 
3786 		/* FUA is masked off if unsupported and can't be the reason */
3787 		btrfs_handle_fs_error(fs_info, -EIO,
3788 				      "%d errors while writing supers",
3789 				      total_errors);
3790 		return -EIO;
3791 	}
3792 
3793 	total_errors = 0;
3794 	list_for_each_entry(dev, head, dev_list) {
3795 		if (!dev->bdev)
3796 			continue;
3797 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3798 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3799 			continue;
3800 
3801 		ret = wait_dev_supers(dev, max_mirrors);
3802 		if (ret)
3803 			total_errors++;
3804 	}
3805 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3806 	if (total_errors > max_errors) {
3807 		btrfs_handle_fs_error(fs_info, -EIO,
3808 				      "%d errors while writing supers",
3809 				      total_errors);
3810 		return -EIO;
3811 	}
3812 	return 0;
3813 }
3814 
3815 /* Drop a fs root from the radix tree and free it. */
btrfs_drop_and_free_fs_root(struct btrfs_fs_info * fs_info,struct btrfs_root * root)3816 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3817 				  struct btrfs_root *root)
3818 {
3819 	spin_lock(&fs_info->fs_roots_radix_lock);
3820 	radix_tree_delete(&fs_info->fs_roots_radix,
3821 			  (unsigned long)root->root_key.objectid);
3822 	spin_unlock(&fs_info->fs_roots_radix_lock);
3823 
3824 	if (btrfs_root_refs(&root->root_item) == 0)
3825 		synchronize_srcu(&fs_info->subvol_srcu);
3826 
3827 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3828 		btrfs_free_log(NULL, root);
3829 		if (root->reloc_root) {
3830 			free_extent_buffer(root->reloc_root->node);
3831 			free_extent_buffer(root->reloc_root->commit_root);
3832 			btrfs_put_fs_root(root->reloc_root);
3833 			root->reloc_root = NULL;
3834 		}
3835 	}
3836 
3837 	if (root->free_ino_pinned)
3838 		__btrfs_remove_free_space_cache(root->free_ino_pinned);
3839 	if (root->free_ino_ctl)
3840 		__btrfs_remove_free_space_cache(root->free_ino_ctl);
3841 	btrfs_free_fs_root(root);
3842 }
3843 
btrfs_free_fs_root(struct btrfs_root * root)3844 void btrfs_free_fs_root(struct btrfs_root *root)
3845 {
3846 	iput(root->ino_cache_inode);
3847 	WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3848 	if (root->anon_dev)
3849 		free_anon_bdev(root->anon_dev);
3850 	if (root->subv_writers)
3851 		btrfs_free_subvolume_writers(root->subv_writers);
3852 	free_extent_buffer(root->node);
3853 	free_extent_buffer(root->commit_root);
3854 	kfree(root->free_ino_ctl);
3855 	kfree(root->free_ino_pinned);
3856 	btrfs_put_fs_root(root);
3857 }
3858 
btrfs_cleanup_fs_roots(struct btrfs_fs_info * fs_info)3859 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3860 {
3861 	u64 root_objectid = 0;
3862 	struct btrfs_root *gang[8];
3863 	int i = 0;
3864 	int err = 0;
3865 	unsigned int ret = 0;
3866 	int index;
3867 
3868 	while (1) {
3869 		index = srcu_read_lock(&fs_info->subvol_srcu);
3870 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3871 					     (void **)gang, root_objectid,
3872 					     ARRAY_SIZE(gang));
3873 		if (!ret) {
3874 			srcu_read_unlock(&fs_info->subvol_srcu, index);
3875 			break;
3876 		}
3877 		root_objectid = gang[ret - 1]->root_key.objectid + 1;
3878 
3879 		for (i = 0; i < ret; i++) {
3880 			/* Avoid to grab roots in dead_roots */
3881 			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3882 				gang[i] = NULL;
3883 				continue;
3884 			}
3885 			/* grab all the search result for later use */
3886 			gang[i] = btrfs_grab_fs_root(gang[i]);
3887 		}
3888 		srcu_read_unlock(&fs_info->subvol_srcu, index);
3889 
3890 		for (i = 0; i < ret; i++) {
3891 			if (!gang[i])
3892 				continue;
3893 			root_objectid = gang[i]->root_key.objectid;
3894 			err = btrfs_orphan_cleanup(gang[i]);
3895 			if (err)
3896 				break;
3897 			btrfs_put_fs_root(gang[i]);
3898 		}
3899 		root_objectid++;
3900 	}
3901 
3902 	/* release the uncleaned roots due to error */
3903 	for (; i < ret; i++) {
3904 		if (gang[i])
3905 			btrfs_put_fs_root(gang[i]);
3906 	}
3907 	return err;
3908 }
3909 
btrfs_commit_super(struct btrfs_fs_info * fs_info)3910 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
3911 {
3912 	struct btrfs_root *root = fs_info->tree_root;
3913 	struct btrfs_trans_handle *trans;
3914 
3915 	mutex_lock(&fs_info->cleaner_mutex);
3916 	btrfs_run_delayed_iputs(fs_info);
3917 	mutex_unlock(&fs_info->cleaner_mutex);
3918 	wake_up_process(fs_info->cleaner_kthread);
3919 
3920 	/* wait until ongoing cleanup work done */
3921 	down_write(&fs_info->cleanup_work_sem);
3922 	up_write(&fs_info->cleanup_work_sem);
3923 
3924 	trans = btrfs_join_transaction(root);
3925 	if (IS_ERR(trans))
3926 		return PTR_ERR(trans);
3927 	return btrfs_commit_transaction(trans);
3928 }
3929 
close_ctree(struct btrfs_fs_info * fs_info)3930 void close_ctree(struct btrfs_fs_info *fs_info)
3931 {
3932 	int ret;
3933 
3934 	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3935 
3936 	/* wait for the qgroup rescan worker to stop */
3937 	btrfs_qgroup_wait_for_completion(fs_info, false);
3938 
3939 	/* wait for the uuid_scan task to finish */
3940 	down(&fs_info->uuid_tree_rescan_sem);
3941 	/* avoid complains from lockdep et al., set sem back to initial state */
3942 	up(&fs_info->uuid_tree_rescan_sem);
3943 
3944 	/* pause restriper - we want to resume on mount */
3945 	btrfs_pause_balance(fs_info);
3946 
3947 	btrfs_dev_replace_suspend_for_unmount(fs_info);
3948 
3949 	btrfs_scrub_cancel(fs_info);
3950 
3951 	/* wait for any defraggers to finish */
3952 	wait_event(fs_info->transaction_wait,
3953 		   (atomic_read(&fs_info->defrag_running) == 0));
3954 
3955 	/* clear out the rbtree of defraggable inodes */
3956 	btrfs_cleanup_defrag_inodes(fs_info);
3957 
3958 	cancel_work_sync(&fs_info->async_reclaim_work);
3959 
3960 	if (!sb_rdonly(fs_info->sb)) {
3961 		/*
3962 		 * If the cleaner thread is stopped and there are
3963 		 * block groups queued for removal, the deletion will be
3964 		 * skipped when we quit the cleaner thread.
3965 		 */
3966 		btrfs_delete_unused_bgs(fs_info);
3967 
3968 		ret = btrfs_commit_super(fs_info);
3969 		if (ret)
3970 			btrfs_err(fs_info, "commit super ret %d", ret);
3971 	}
3972 
3973 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
3974 	    test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
3975 		btrfs_error_commit_super(fs_info);
3976 
3977 	kthread_stop(fs_info->transaction_kthread);
3978 	kthread_stop(fs_info->cleaner_kthread);
3979 
3980 	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
3981 
3982 	btrfs_free_qgroup_config(fs_info);
3983 	ASSERT(list_empty(&fs_info->delalloc_roots));
3984 
3985 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3986 		btrfs_info(fs_info, "at unmount delalloc count %lld",
3987 		       percpu_counter_sum(&fs_info->delalloc_bytes));
3988 	}
3989 
3990 	btrfs_sysfs_remove_mounted(fs_info);
3991 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3992 
3993 	btrfs_free_fs_roots(fs_info);
3994 
3995 	btrfs_put_block_group_cache(fs_info);
3996 
3997 	/*
3998 	 * we must make sure there is not any read request to
3999 	 * submit after we stopping all workers.
4000 	 */
4001 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4002 	btrfs_stop_all_workers(fs_info);
4003 
4004 	btrfs_free_block_groups(fs_info);
4005 
4006 	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4007 	free_root_pointers(fs_info, 1);
4008 
4009 	iput(fs_info->btree_inode);
4010 
4011 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4012 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
4013 		btrfsic_unmount(fs_info->fs_devices);
4014 #endif
4015 
4016 	btrfs_close_devices(fs_info->fs_devices);
4017 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
4018 
4019 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
4020 	percpu_counter_destroy(&fs_info->delalloc_bytes);
4021 	percpu_counter_destroy(&fs_info->bio_counter);
4022 	cleanup_srcu_struct(&fs_info->subvol_srcu);
4023 
4024 	btrfs_free_stripe_hash_table(fs_info);
4025 	btrfs_free_ref_cache(fs_info);
4026 
4027 	while (!list_empty(&fs_info->pinned_chunks)) {
4028 		struct extent_map *em;
4029 
4030 		em = list_first_entry(&fs_info->pinned_chunks,
4031 				      struct extent_map, list);
4032 		list_del_init(&em->list);
4033 		free_extent_map(em);
4034 	}
4035 }
4036 
btrfs_buffer_uptodate(struct extent_buffer * buf,u64 parent_transid,int atomic)4037 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
4038 			  int atomic)
4039 {
4040 	int ret;
4041 	struct inode *btree_inode = buf->pages[0]->mapping->host;
4042 
4043 	ret = extent_buffer_uptodate(buf);
4044 	if (!ret)
4045 		return ret;
4046 
4047 	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
4048 				    parent_transid, atomic);
4049 	if (ret == -EAGAIN)
4050 		return ret;
4051 	return !ret;
4052 }
4053 
btrfs_mark_buffer_dirty(struct extent_buffer * buf)4054 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4055 {
4056 	struct btrfs_fs_info *fs_info;
4057 	struct btrfs_root *root;
4058 	u64 transid = btrfs_header_generation(buf);
4059 	int was_dirty;
4060 
4061 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4062 	/*
4063 	 * This is a fast path so only do this check if we have sanity tests
4064 	 * enabled.  Normal people shouldn't be using umapped buffers as dirty
4065 	 * outside of the sanity tests.
4066 	 */
4067 	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4068 		return;
4069 #endif
4070 	root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4071 	fs_info = root->fs_info;
4072 	btrfs_assert_tree_locked(buf);
4073 	if (transid != fs_info->generation)
4074 		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4075 			buf->start, transid, fs_info->generation);
4076 	was_dirty = set_extent_buffer_dirty(buf);
4077 	if (!was_dirty)
4078 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4079 					 buf->len,
4080 					 fs_info->dirty_metadata_batch);
4081 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4082 	/*
4083 	 * Since btrfs_mark_buffer_dirty() can be called with item pointer set
4084 	 * but item data not updated.
4085 	 * So here we should only check item pointers, not item data.
4086 	 */
4087 	if (btrfs_header_level(buf) == 0 &&
4088 	    btrfs_check_leaf_relaxed(fs_info, buf)) {
4089 		btrfs_print_leaf(buf);
4090 		ASSERT(0);
4091 	}
4092 #endif
4093 }
4094 
__btrfs_btree_balance_dirty(struct btrfs_fs_info * fs_info,int flush_delayed)4095 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4096 					int flush_delayed)
4097 {
4098 	/*
4099 	 * looks as though older kernels can get into trouble with
4100 	 * this code, they end up stuck in balance_dirty_pages forever
4101 	 */
4102 	int ret;
4103 
4104 	if (current->flags & PF_MEMALLOC)
4105 		return;
4106 
4107 	if (flush_delayed)
4108 		btrfs_balance_delayed_items(fs_info);
4109 
4110 	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4111 				     BTRFS_DIRTY_METADATA_THRESH,
4112 				     fs_info->dirty_metadata_batch);
4113 	if (ret > 0) {
4114 		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4115 	}
4116 }
4117 
btrfs_btree_balance_dirty(struct btrfs_fs_info * fs_info)4118 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4119 {
4120 	__btrfs_btree_balance_dirty(fs_info, 1);
4121 }
4122 
btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info * fs_info)4123 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4124 {
4125 	__btrfs_btree_balance_dirty(fs_info, 0);
4126 }
4127 
btrfs_read_buffer(struct extent_buffer * buf,u64 parent_transid,int level,struct btrfs_key * first_key)4128 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
4129 		      struct btrfs_key *first_key)
4130 {
4131 	struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4132 	struct btrfs_fs_info *fs_info = root->fs_info;
4133 
4134 	return btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
4135 					      level, first_key);
4136 }
4137 
btrfs_error_commit_super(struct btrfs_fs_info * fs_info)4138 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4139 {
4140 	/* cleanup FS via transaction */
4141 	btrfs_cleanup_transaction(fs_info);
4142 
4143 	mutex_lock(&fs_info->cleaner_mutex);
4144 	btrfs_run_delayed_iputs(fs_info);
4145 	mutex_unlock(&fs_info->cleaner_mutex);
4146 
4147 	down_write(&fs_info->cleanup_work_sem);
4148 	up_write(&fs_info->cleanup_work_sem);
4149 }
4150 
btrfs_destroy_ordered_extents(struct btrfs_root * root)4151 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4152 {
4153 	struct btrfs_ordered_extent *ordered;
4154 
4155 	spin_lock(&root->ordered_extent_lock);
4156 	/*
4157 	 * This will just short circuit the ordered completion stuff which will
4158 	 * make sure the ordered extent gets properly cleaned up.
4159 	 */
4160 	list_for_each_entry(ordered, &root->ordered_extents,
4161 			    root_extent_list)
4162 		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4163 	spin_unlock(&root->ordered_extent_lock);
4164 }
4165 
btrfs_destroy_all_ordered_extents(struct btrfs_fs_info * fs_info)4166 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4167 {
4168 	struct btrfs_root *root;
4169 	struct list_head splice;
4170 
4171 	INIT_LIST_HEAD(&splice);
4172 
4173 	spin_lock(&fs_info->ordered_root_lock);
4174 	list_splice_init(&fs_info->ordered_roots, &splice);
4175 	while (!list_empty(&splice)) {
4176 		root = list_first_entry(&splice, struct btrfs_root,
4177 					ordered_root);
4178 		list_move_tail(&root->ordered_root,
4179 			       &fs_info->ordered_roots);
4180 
4181 		spin_unlock(&fs_info->ordered_root_lock);
4182 		btrfs_destroy_ordered_extents(root);
4183 
4184 		cond_resched();
4185 		spin_lock(&fs_info->ordered_root_lock);
4186 	}
4187 	spin_unlock(&fs_info->ordered_root_lock);
4188 }
4189 
btrfs_destroy_delayed_refs(struct btrfs_transaction * trans,struct btrfs_fs_info * fs_info)4190 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4191 				      struct btrfs_fs_info *fs_info)
4192 {
4193 	struct rb_node *node;
4194 	struct btrfs_delayed_ref_root *delayed_refs;
4195 	struct btrfs_delayed_ref_node *ref;
4196 	int ret = 0;
4197 
4198 	delayed_refs = &trans->delayed_refs;
4199 
4200 	spin_lock(&delayed_refs->lock);
4201 	if (atomic_read(&delayed_refs->num_entries) == 0) {
4202 		spin_unlock(&delayed_refs->lock);
4203 		btrfs_info(fs_info, "delayed_refs has NO entry");
4204 		return ret;
4205 	}
4206 
4207 	while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
4208 		struct btrfs_delayed_ref_head *head;
4209 		struct rb_node *n;
4210 		bool pin_bytes = false;
4211 
4212 		head = rb_entry(node, struct btrfs_delayed_ref_head,
4213 				href_node);
4214 		if (!mutex_trylock(&head->mutex)) {
4215 			refcount_inc(&head->refs);
4216 			spin_unlock(&delayed_refs->lock);
4217 
4218 			mutex_lock(&head->mutex);
4219 			mutex_unlock(&head->mutex);
4220 			btrfs_put_delayed_ref_head(head);
4221 			spin_lock(&delayed_refs->lock);
4222 			continue;
4223 		}
4224 		spin_lock(&head->lock);
4225 		while ((n = rb_first(&head->ref_tree)) != NULL) {
4226 			ref = rb_entry(n, struct btrfs_delayed_ref_node,
4227 				       ref_node);
4228 			ref->in_tree = 0;
4229 			rb_erase(&ref->ref_node, &head->ref_tree);
4230 			RB_CLEAR_NODE(&ref->ref_node);
4231 			if (!list_empty(&ref->add_list))
4232 				list_del(&ref->add_list);
4233 			atomic_dec(&delayed_refs->num_entries);
4234 			btrfs_put_delayed_ref(ref);
4235 		}
4236 		if (head->must_insert_reserved)
4237 			pin_bytes = true;
4238 		btrfs_free_delayed_extent_op(head->extent_op);
4239 		delayed_refs->num_heads--;
4240 		if (head->processing == 0)
4241 			delayed_refs->num_heads_ready--;
4242 		atomic_dec(&delayed_refs->num_entries);
4243 		rb_erase(&head->href_node, &delayed_refs->href_root);
4244 		RB_CLEAR_NODE(&head->href_node);
4245 		spin_unlock(&head->lock);
4246 		spin_unlock(&delayed_refs->lock);
4247 		mutex_unlock(&head->mutex);
4248 
4249 		if (pin_bytes)
4250 			btrfs_pin_extent(fs_info, head->bytenr,
4251 					 head->num_bytes, 1);
4252 		btrfs_put_delayed_ref_head(head);
4253 		cond_resched();
4254 		spin_lock(&delayed_refs->lock);
4255 	}
4256 
4257 	spin_unlock(&delayed_refs->lock);
4258 
4259 	return ret;
4260 }
4261 
btrfs_destroy_delalloc_inodes(struct btrfs_root * root)4262 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4263 {
4264 	struct btrfs_inode *btrfs_inode;
4265 	struct list_head splice;
4266 
4267 	INIT_LIST_HEAD(&splice);
4268 
4269 	spin_lock(&root->delalloc_lock);
4270 	list_splice_init(&root->delalloc_inodes, &splice);
4271 
4272 	while (!list_empty(&splice)) {
4273 		struct inode *inode = NULL;
4274 		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4275 					       delalloc_inodes);
4276 		__btrfs_del_delalloc_inode(root, btrfs_inode);
4277 		spin_unlock(&root->delalloc_lock);
4278 
4279 		/*
4280 		 * Make sure we get a live inode and that it'll not disappear
4281 		 * meanwhile.
4282 		 */
4283 		inode = igrab(&btrfs_inode->vfs_inode);
4284 		if (inode) {
4285 			invalidate_inode_pages2(inode->i_mapping);
4286 			iput(inode);
4287 		}
4288 		spin_lock(&root->delalloc_lock);
4289 	}
4290 	spin_unlock(&root->delalloc_lock);
4291 }
4292 
btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info * fs_info)4293 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4294 {
4295 	struct btrfs_root *root;
4296 	struct list_head splice;
4297 
4298 	INIT_LIST_HEAD(&splice);
4299 
4300 	spin_lock(&fs_info->delalloc_root_lock);
4301 	list_splice_init(&fs_info->delalloc_roots, &splice);
4302 	while (!list_empty(&splice)) {
4303 		root = list_first_entry(&splice, struct btrfs_root,
4304 					 delalloc_root);
4305 		root = btrfs_grab_fs_root(root);
4306 		BUG_ON(!root);
4307 		spin_unlock(&fs_info->delalloc_root_lock);
4308 
4309 		btrfs_destroy_delalloc_inodes(root);
4310 		btrfs_put_fs_root(root);
4311 
4312 		spin_lock(&fs_info->delalloc_root_lock);
4313 	}
4314 	spin_unlock(&fs_info->delalloc_root_lock);
4315 }
4316 
btrfs_destroy_marked_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages,int mark)4317 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4318 					struct extent_io_tree *dirty_pages,
4319 					int mark)
4320 {
4321 	int ret;
4322 	struct extent_buffer *eb;
4323 	u64 start = 0;
4324 	u64 end;
4325 
4326 	while (1) {
4327 		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4328 					    mark, NULL);
4329 		if (ret)
4330 			break;
4331 
4332 		clear_extent_bits(dirty_pages, start, end, mark);
4333 		while (start <= end) {
4334 			eb = find_extent_buffer(fs_info, start);
4335 			start += fs_info->nodesize;
4336 			if (!eb)
4337 				continue;
4338 			wait_on_extent_buffer_writeback(eb);
4339 
4340 			if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4341 					       &eb->bflags))
4342 				clear_extent_buffer_dirty(eb);
4343 			free_extent_buffer_stale(eb);
4344 		}
4345 	}
4346 
4347 	return ret;
4348 }
4349 
btrfs_destroy_pinned_extent(struct btrfs_fs_info * fs_info,struct extent_io_tree * pinned_extents)4350 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4351 				       struct extent_io_tree *pinned_extents)
4352 {
4353 	struct extent_io_tree *unpin;
4354 	u64 start;
4355 	u64 end;
4356 	int ret;
4357 	bool loop = true;
4358 
4359 	unpin = pinned_extents;
4360 again:
4361 	while (1) {
4362 		ret = find_first_extent_bit(unpin, 0, &start, &end,
4363 					    EXTENT_DIRTY, NULL);
4364 		if (ret)
4365 			break;
4366 
4367 		clear_extent_dirty(unpin, start, end);
4368 		btrfs_error_unpin_extent_range(fs_info, start, end);
4369 		cond_resched();
4370 	}
4371 
4372 	if (loop) {
4373 		if (unpin == &fs_info->freed_extents[0])
4374 			unpin = &fs_info->freed_extents[1];
4375 		else
4376 			unpin = &fs_info->freed_extents[0];
4377 		loop = false;
4378 		goto again;
4379 	}
4380 
4381 	return 0;
4382 }
4383 
btrfs_cleanup_bg_io(struct btrfs_block_group_cache * cache)4384 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4385 {
4386 	struct inode *inode;
4387 
4388 	inode = cache->io_ctl.inode;
4389 	if (inode) {
4390 		invalidate_inode_pages2(inode->i_mapping);
4391 		BTRFS_I(inode)->generation = 0;
4392 		cache->io_ctl.inode = NULL;
4393 		iput(inode);
4394 	}
4395 	btrfs_put_block_group(cache);
4396 }
4397 
btrfs_cleanup_dirty_bgs(struct btrfs_transaction * cur_trans,struct btrfs_fs_info * fs_info)4398 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4399 			     struct btrfs_fs_info *fs_info)
4400 {
4401 	struct btrfs_block_group_cache *cache;
4402 
4403 	spin_lock(&cur_trans->dirty_bgs_lock);
4404 	while (!list_empty(&cur_trans->dirty_bgs)) {
4405 		cache = list_first_entry(&cur_trans->dirty_bgs,
4406 					 struct btrfs_block_group_cache,
4407 					 dirty_list);
4408 
4409 		if (!list_empty(&cache->io_list)) {
4410 			spin_unlock(&cur_trans->dirty_bgs_lock);
4411 			list_del_init(&cache->io_list);
4412 			btrfs_cleanup_bg_io(cache);
4413 			spin_lock(&cur_trans->dirty_bgs_lock);
4414 		}
4415 
4416 		list_del_init(&cache->dirty_list);
4417 		spin_lock(&cache->lock);
4418 		cache->disk_cache_state = BTRFS_DC_ERROR;
4419 		spin_unlock(&cache->lock);
4420 
4421 		spin_unlock(&cur_trans->dirty_bgs_lock);
4422 		btrfs_put_block_group(cache);
4423 		spin_lock(&cur_trans->dirty_bgs_lock);
4424 	}
4425 	spin_unlock(&cur_trans->dirty_bgs_lock);
4426 
4427 	/*
4428 	 * Refer to the definition of io_bgs member for details why it's safe
4429 	 * to use it without any locking
4430 	 */
4431 	while (!list_empty(&cur_trans->io_bgs)) {
4432 		cache = list_first_entry(&cur_trans->io_bgs,
4433 					 struct btrfs_block_group_cache,
4434 					 io_list);
4435 
4436 		list_del_init(&cache->io_list);
4437 		spin_lock(&cache->lock);
4438 		cache->disk_cache_state = BTRFS_DC_ERROR;
4439 		spin_unlock(&cache->lock);
4440 		btrfs_cleanup_bg_io(cache);
4441 	}
4442 }
4443 
btrfs_cleanup_one_transaction(struct btrfs_transaction * cur_trans,struct btrfs_fs_info * fs_info)4444 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4445 				   struct btrfs_fs_info *fs_info)
4446 {
4447 	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4448 	ASSERT(list_empty(&cur_trans->dirty_bgs));
4449 	ASSERT(list_empty(&cur_trans->io_bgs));
4450 
4451 	btrfs_destroy_delayed_refs(cur_trans, fs_info);
4452 
4453 	cur_trans->state = TRANS_STATE_COMMIT_START;
4454 	wake_up(&fs_info->transaction_blocked_wait);
4455 
4456 	cur_trans->state = TRANS_STATE_UNBLOCKED;
4457 	wake_up(&fs_info->transaction_wait);
4458 
4459 	btrfs_destroy_delayed_inodes(fs_info);
4460 	btrfs_assert_delayed_root_empty(fs_info);
4461 
4462 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4463 				     EXTENT_DIRTY);
4464 	btrfs_destroy_pinned_extent(fs_info,
4465 				    fs_info->pinned_extents);
4466 
4467 	cur_trans->state =TRANS_STATE_COMPLETED;
4468 	wake_up(&cur_trans->commit_wait);
4469 }
4470 
btrfs_cleanup_transaction(struct btrfs_fs_info * fs_info)4471 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4472 {
4473 	struct btrfs_transaction *t;
4474 
4475 	mutex_lock(&fs_info->transaction_kthread_mutex);
4476 
4477 	spin_lock(&fs_info->trans_lock);
4478 	while (!list_empty(&fs_info->trans_list)) {
4479 		t = list_first_entry(&fs_info->trans_list,
4480 				     struct btrfs_transaction, list);
4481 		if (t->state >= TRANS_STATE_COMMIT_START) {
4482 			refcount_inc(&t->use_count);
4483 			spin_unlock(&fs_info->trans_lock);
4484 			btrfs_wait_for_commit(fs_info, t->transid);
4485 			btrfs_put_transaction(t);
4486 			spin_lock(&fs_info->trans_lock);
4487 			continue;
4488 		}
4489 		if (t == fs_info->running_transaction) {
4490 			t->state = TRANS_STATE_COMMIT_DOING;
4491 			spin_unlock(&fs_info->trans_lock);
4492 			/*
4493 			 * We wait for 0 num_writers since we don't hold a trans
4494 			 * handle open currently for this transaction.
4495 			 */
4496 			wait_event(t->writer_wait,
4497 				   atomic_read(&t->num_writers) == 0);
4498 		} else {
4499 			spin_unlock(&fs_info->trans_lock);
4500 		}
4501 		btrfs_cleanup_one_transaction(t, fs_info);
4502 
4503 		spin_lock(&fs_info->trans_lock);
4504 		if (t == fs_info->running_transaction)
4505 			fs_info->running_transaction = NULL;
4506 		list_del_init(&t->list);
4507 		spin_unlock(&fs_info->trans_lock);
4508 
4509 		btrfs_put_transaction(t);
4510 		trace_btrfs_transaction_commit(fs_info->tree_root);
4511 		spin_lock(&fs_info->trans_lock);
4512 	}
4513 	spin_unlock(&fs_info->trans_lock);
4514 	btrfs_destroy_all_ordered_extents(fs_info);
4515 	btrfs_destroy_delayed_inodes(fs_info);
4516 	btrfs_assert_delayed_root_empty(fs_info);
4517 	btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4518 	btrfs_destroy_all_delalloc_inodes(fs_info);
4519 	mutex_unlock(&fs_info->transaction_kthread_mutex);
4520 
4521 	return 0;
4522 }
4523 
4524 static const struct extent_io_ops btree_extent_io_ops = {
4525 	/* mandatory callbacks */
4526 	.submit_bio_hook = btree_submit_bio_hook,
4527 	.readpage_end_io_hook = btree_readpage_end_io_hook,
4528 	.readpage_io_failed_hook = btree_io_failed_hook,
4529 
4530 	/* optional callbacks */
4531 };
4532