1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/slab.h>
8 #include <linux/sched.h>
9 #include <linux/writeback.h>
10 #include <linux/pagemap.h>
11 #include <linux/blkdev.h>
12 #include <linux/uuid.h>
13 #include "misc.h"
14 #include "ctree.h"
15 #include "disk-io.h"
16 #include "transaction.h"
17 #include "locking.h"
18 #include "tree-log.h"
19 #include "volumes.h"
20 #include "dev-replace.h"
21 #include "qgroup.h"
22 #include "block-group.h"
23 #include "space-info.h"
24 #include "zoned.h"
25 
26 #define BTRFS_ROOT_TRANS_TAG 0
27 
28 /*
29  * Transaction states and transitions
30  *
31  * No running transaction (fs tree blocks are not modified)
32  * |
33  * | To next stage:
34  * |  Call start_transaction() variants. Except btrfs_join_transaction_nostart().
35  * V
36  * Transaction N [[TRANS_STATE_RUNNING]]
37  * |
38  * | New trans handles can be attached to transaction N by calling all
39  * | start_transaction() variants.
40  * |
41  * | To next stage:
42  * |  Call btrfs_commit_transaction() on any trans handle attached to
43  * |  transaction N
44  * V
45  * Transaction N [[TRANS_STATE_COMMIT_START]]
46  * |
47  * | Will wait for previous running transaction to completely finish if there
48  * | is one
49  * |
50  * | Then one of the following happes:
51  * | - Wait for all other trans handle holders to release.
52  * |   The btrfs_commit_transaction() caller will do the commit work.
53  * | - Wait for current transaction to be committed by others.
54  * |   Other btrfs_commit_transaction() caller will do the commit work.
55  * |
56  * | At this stage, only btrfs_join_transaction*() variants can attach
57  * | to this running transaction.
58  * | All other variants will wait for current one to finish and attach to
59  * | transaction N+1.
60  * |
61  * | To next stage:
62  * |  Caller is chosen to commit transaction N, and all other trans handle
63  * |  haven been released.
64  * V
65  * Transaction N [[TRANS_STATE_COMMIT_DOING]]
66  * |
67  * | The heavy lifting transaction work is started.
68  * | From running delayed refs (modifying extent tree) to creating pending
69  * | snapshots, running qgroups.
70  * | In short, modify supporting trees to reflect modifications of subvolume
71  * | trees.
72  * |
73  * | At this stage, all start_transaction() calls will wait for this
74  * | transaction to finish and attach to transaction N+1.
75  * |
76  * | To next stage:
77  * |  Until all supporting trees are updated.
78  * V
79  * Transaction N [[TRANS_STATE_UNBLOCKED]]
80  * |						    Transaction N+1
81  * | All needed trees are modified, thus we only    [[TRANS_STATE_RUNNING]]
82  * | need to write them back to disk and update	    |
83  * | super blocks.				    |
84  * |						    |
85  * | At this stage, new transaction is allowed to   |
86  * | start.					    |
87  * | All new start_transaction() calls will be	    |
88  * | attached to transid N+1.			    |
89  * |						    |
90  * | To next stage:				    |
91  * |  Until all tree blocks are super blocks are    |
92  * |  written to block devices			    |
93  * V						    |
94  * Transaction N [[TRANS_STATE_COMPLETED]]	    V
95  *   All tree blocks and super blocks are written.  Transaction N+1
96  *   This transaction is finished and all its	    [[TRANS_STATE_COMMIT_START]]
97  *   data structures will be cleaned up.	    | Life goes on
98  */
99 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
100 	[TRANS_STATE_RUNNING]		= 0U,
101 	[TRANS_STATE_COMMIT_START]	= (__TRANS_START | __TRANS_ATTACH),
102 	[TRANS_STATE_COMMIT_DOING]	= (__TRANS_START |
103 					   __TRANS_ATTACH |
104 					   __TRANS_JOIN |
105 					   __TRANS_JOIN_NOSTART),
106 	[TRANS_STATE_UNBLOCKED]		= (__TRANS_START |
107 					   __TRANS_ATTACH |
108 					   __TRANS_JOIN |
109 					   __TRANS_JOIN_NOLOCK |
110 					   __TRANS_JOIN_NOSTART),
111 	[TRANS_STATE_SUPER_COMMITTED]	= (__TRANS_START |
112 					   __TRANS_ATTACH |
113 					   __TRANS_JOIN |
114 					   __TRANS_JOIN_NOLOCK |
115 					   __TRANS_JOIN_NOSTART),
116 	[TRANS_STATE_COMPLETED]		= (__TRANS_START |
117 					   __TRANS_ATTACH |
118 					   __TRANS_JOIN |
119 					   __TRANS_JOIN_NOLOCK |
120 					   __TRANS_JOIN_NOSTART),
121 };
122 
btrfs_put_transaction(struct btrfs_transaction * transaction)123 void btrfs_put_transaction(struct btrfs_transaction *transaction)
124 {
125 	WARN_ON(refcount_read(&transaction->use_count) == 0);
126 	if (refcount_dec_and_test(&transaction->use_count)) {
127 		BUG_ON(!list_empty(&transaction->list));
128 		WARN_ON(!RB_EMPTY_ROOT(
129 				&transaction->delayed_refs.href_root.rb_root));
130 		WARN_ON(!RB_EMPTY_ROOT(
131 				&transaction->delayed_refs.dirty_extent_root));
132 		if (transaction->delayed_refs.pending_csums)
133 			btrfs_err(transaction->fs_info,
134 				  "pending csums is %llu",
135 				  transaction->delayed_refs.pending_csums);
136 		/*
137 		 * If any block groups are found in ->deleted_bgs then it's
138 		 * because the transaction was aborted and a commit did not
139 		 * happen (things failed before writing the new superblock
140 		 * and calling btrfs_finish_extent_commit()), so we can not
141 		 * discard the physical locations of the block groups.
142 		 */
143 		while (!list_empty(&transaction->deleted_bgs)) {
144 			struct btrfs_block_group *cache;
145 
146 			cache = list_first_entry(&transaction->deleted_bgs,
147 						 struct btrfs_block_group,
148 						 bg_list);
149 			list_del_init(&cache->bg_list);
150 			btrfs_unfreeze_block_group(cache);
151 			btrfs_put_block_group(cache);
152 		}
153 		WARN_ON(!list_empty(&transaction->dev_update_list));
154 		kfree(transaction);
155 	}
156 }
157 
switch_commit_roots(struct btrfs_trans_handle * trans)158 static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
159 {
160 	struct btrfs_transaction *cur_trans = trans->transaction;
161 	struct btrfs_fs_info *fs_info = trans->fs_info;
162 	struct btrfs_root *root, *tmp;
163 	struct btrfs_caching_control *caching_ctl, *next;
164 
165 	down_write(&fs_info->commit_root_sem);
166 	list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits,
167 				 dirty_list) {
168 		list_del_init(&root->dirty_list);
169 		free_extent_buffer(root->commit_root);
170 		root->commit_root = btrfs_root_node(root);
171 		extent_io_tree_release(&root->dirty_log_pages);
172 		btrfs_qgroup_clean_swapped_blocks(root);
173 	}
174 
175 	/* We can free old roots now. */
176 	spin_lock(&cur_trans->dropped_roots_lock);
177 	while (!list_empty(&cur_trans->dropped_roots)) {
178 		root = list_first_entry(&cur_trans->dropped_roots,
179 					struct btrfs_root, root_list);
180 		list_del_init(&root->root_list);
181 		spin_unlock(&cur_trans->dropped_roots_lock);
182 		btrfs_free_log(trans, root);
183 		btrfs_drop_and_free_fs_root(fs_info, root);
184 		spin_lock(&cur_trans->dropped_roots_lock);
185 	}
186 	spin_unlock(&cur_trans->dropped_roots_lock);
187 
188 	/*
189 	 * We have to update the last_byte_to_unpin under the commit_root_sem,
190 	 * at the same time we swap out the commit roots.
191 	 *
192 	 * This is because we must have a real view of the last spot the caching
193 	 * kthreads were while caching.  Consider the following views of the
194 	 * extent tree for a block group
195 	 *
196 	 * commit root
197 	 * +----+----+----+----+----+----+----+
198 	 * |\\\\|    |\\\\|\\\\|    |\\\\|\\\\|
199 	 * +----+----+----+----+----+----+----+
200 	 * 0    1    2    3    4    5    6    7
201 	 *
202 	 * new commit root
203 	 * +----+----+----+----+----+----+----+
204 	 * |    |    |    |\\\\|    |    |\\\\|
205 	 * +----+----+----+----+----+----+----+
206 	 * 0    1    2    3    4    5    6    7
207 	 *
208 	 * If the cache_ctl->progress was at 3, then we are only allowed to
209 	 * unpin [0,1) and [2,3], because the caching thread has already
210 	 * processed those extents.  We are not allowed to unpin [5,6), because
211 	 * the caching thread will re-start it's search from 3, and thus find
212 	 * the hole from [4,6) to add to the free space cache.
213 	 */
214 	spin_lock(&fs_info->block_group_cache_lock);
215 	list_for_each_entry_safe(caching_ctl, next,
216 				 &fs_info->caching_block_groups, list) {
217 		struct btrfs_block_group *cache = caching_ctl->block_group;
218 
219 		if (btrfs_block_group_done(cache)) {
220 			cache->last_byte_to_unpin = (u64)-1;
221 			list_del_init(&caching_ctl->list);
222 			btrfs_put_caching_control(caching_ctl);
223 		} else {
224 			cache->last_byte_to_unpin = caching_ctl->progress;
225 		}
226 	}
227 	spin_unlock(&fs_info->block_group_cache_lock);
228 	up_write(&fs_info->commit_root_sem);
229 }
230 
extwriter_counter_inc(struct btrfs_transaction * trans,unsigned int type)231 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
232 					 unsigned int type)
233 {
234 	if (type & TRANS_EXTWRITERS)
235 		atomic_inc(&trans->num_extwriters);
236 }
237 
extwriter_counter_dec(struct btrfs_transaction * trans,unsigned int type)238 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
239 					 unsigned int type)
240 {
241 	if (type & TRANS_EXTWRITERS)
242 		atomic_dec(&trans->num_extwriters);
243 }
244 
extwriter_counter_init(struct btrfs_transaction * trans,unsigned int type)245 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
246 					  unsigned int type)
247 {
248 	atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
249 }
250 
extwriter_counter_read(struct btrfs_transaction * trans)251 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
252 {
253 	return atomic_read(&trans->num_extwriters);
254 }
255 
256 /*
257  * To be called after doing the chunk btree updates right after allocating a new
258  * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a
259  * chunk after all chunk btree updates and after finishing the second phase of
260  * chunk allocation (btrfs_create_pending_block_groups()) in case some block
261  * group had its chunk item insertion delayed to the second phase.
262  */
btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle * trans)263 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
264 {
265 	struct btrfs_fs_info *fs_info = trans->fs_info;
266 
267 	if (!trans->chunk_bytes_reserved)
268 		return;
269 
270 	btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
271 				trans->chunk_bytes_reserved, NULL);
272 	trans->chunk_bytes_reserved = 0;
273 }
274 
275 /*
276  * either allocate a new transaction or hop into the existing one
277  */
join_transaction(struct btrfs_fs_info * fs_info,unsigned int type)278 static noinline int join_transaction(struct btrfs_fs_info *fs_info,
279 				     unsigned int type)
280 {
281 	struct btrfs_transaction *cur_trans;
282 
283 	spin_lock(&fs_info->trans_lock);
284 loop:
285 	/* The file system has been taken offline. No new transactions. */
286 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
287 		spin_unlock(&fs_info->trans_lock);
288 		return -EROFS;
289 	}
290 
291 	cur_trans = fs_info->running_transaction;
292 	if (cur_trans) {
293 		if (TRANS_ABORTED(cur_trans)) {
294 			spin_unlock(&fs_info->trans_lock);
295 			return cur_trans->aborted;
296 		}
297 		if (btrfs_blocked_trans_types[cur_trans->state] & type) {
298 			spin_unlock(&fs_info->trans_lock);
299 			return -EBUSY;
300 		}
301 		refcount_inc(&cur_trans->use_count);
302 		atomic_inc(&cur_trans->num_writers);
303 		extwriter_counter_inc(cur_trans, type);
304 		spin_unlock(&fs_info->trans_lock);
305 		return 0;
306 	}
307 	spin_unlock(&fs_info->trans_lock);
308 
309 	/*
310 	 * If we are ATTACH, we just want to catch the current transaction,
311 	 * and commit it. If there is no transaction, just return ENOENT.
312 	 */
313 	if (type == TRANS_ATTACH)
314 		return -ENOENT;
315 
316 	/*
317 	 * JOIN_NOLOCK only happens during the transaction commit, so
318 	 * it is impossible that ->running_transaction is NULL
319 	 */
320 	BUG_ON(type == TRANS_JOIN_NOLOCK);
321 
322 	cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS);
323 	if (!cur_trans)
324 		return -ENOMEM;
325 
326 	spin_lock(&fs_info->trans_lock);
327 	if (fs_info->running_transaction) {
328 		/*
329 		 * someone started a transaction after we unlocked.  Make sure
330 		 * to redo the checks above
331 		 */
332 		kfree(cur_trans);
333 		goto loop;
334 	} else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
335 		spin_unlock(&fs_info->trans_lock);
336 		kfree(cur_trans);
337 		return -EROFS;
338 	}
339 
340 	cur_trans->fs_info = fs_info;
341 	atomic_set(&cur_trans->pending_ordered, 0);
342 	init_waitqueue_head(&cur_trans->pending_wait);
343 	atomic_set(&cur_trans->num_writers, 1);
344 	extwriter_counter_init(cur_trans, type);
345 	init_waitqueue_head(&cur_trans->writer_wait);
346 	init_waitqueue_head(&cur_trans->commit_wait);
347 	cur_trans->state = TRANS_STATE_RUNNING;
348 	/*
349 	 * One for this trans handle, one so it will live on until we
350 	 * commit the transaction.
351 	 */
352 	refcount_set(&cur_trans->use_count, 2);
353 	cur_trans->flags = 0;
354 	cur_trans->start_time = ktime_get_seconds();
355 
356 	memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
357 
358 	cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
359 	cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
360 	atomic_set(&cur_trans->delayed_refs.num_entries, 0);
361 
362 	/*
363 	 * although the tree mod log is per file system and not per transaction,
364 	 * the log must never go across transaction boundaries.
365 	 */
366 	smp_mb();
367 	if (!list_empty(&fs_info->tree_mod_seq_list))
368 		WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
369 	if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
370 		WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
371 	atomic64_set(&fs_info->tree_mod_seq, 0);
372 
373 	spin_lock_init(&cur_trans->delayed_refs.lock);
374 
375 	INIT_LIST_HEAD(&cur_trans->pending_snapshots);
376 	INIT_LIST_HEAD(&cur_trans->dev_update_list);
377 	INIT_LIST_HEAD(&cur_trans->switch_commits);
378 	INIT_LIST_HEAD(&cur_trans->dirty_bgs);
379 	INIT_LIST_HEAD(&cur_trans->io_bgs);
380 	INIT_LIST_HEAD(&cur_trans->dropped_roots);
381 	mutex_init(&cur_trans->cache_write_mutex);
382 	spin_lock_init(&cur_trans->dirty_bgs_lock);
383 	INIT_LIST_HEAD(&cur_trans->deleted_bgs);
384 	spin_lock_init(&cur_trans->dropped_roots_lock);
385 	INIT_LIST_HEAD(&cur_trans->releasing_ebs);
386 	spin_lock_init(&cur_trans->releasing_ebs_lock);
387 	list_add_tail(&cur_trans->list, &fs_info->trans_list);
388 	extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
389 			IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
390 	extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
391 			IO_TREE_FS_PINNED_EXTENTS, NULL);
392 	fs_info->generation++;
393 	cur_trans->transid = fs_info->generation;
394 	fs_info->running_transaction = cur_trans;
395 	cur_trans->aborted = 0;
396 	spin_unlock(&fs_info->trans_lock);
397 
398 	return 0;
399 }
400 
401 /*
402  * This does all the record keeping required to make sure that a shareable root
403  * is properly recorded in a given transaction.  This is required to make sure
404  * the old root from before we joined the transaction is deleted when the
405  * transaction commits.
406  */
record_root_in_trans(struct btrfs_trans_handle * trans,struct btrfs_root * root,int force)407 static int record_root_in_trans(struct btrfs_trans_handle *trans,
408 			       struct btrfs_root *root,
409 			       int force)
410 {
411 	struct btrfs_fs_info *fs_info = root->fs_info;
412 	int ret = 0;
413 
414 	if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
415 	    root->last_trans < trans->transid) || force) {
416 		WARN_ON(root == fs_info->extent_root);
417 		WARN_ON(!force && root->commit_root != root->node);
418 
419 		/*
420 		 * see below for IN_TRANS_SETUP usage rules
421 		 * we have the reloc mutex held now, so there
422 		 * is only one writer in this function
423 		 */
424 		set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
425 
426 		/* make sure readers find IN_TRANS_SETUP before
427 		 * they find our root->last_trans update
428 		 */
429 		smp_wmb();
430 
431 		spin_lock(&fs_info->fs_roots_radix_lock);
432 		if (root->last_trans == trans->transid && !force) {
433 			spin_unlock(&fs_info->fs_roots_radix_lock);
434 			return 0;
435 		}
436 		radix_tree_tag_set(&fs_info->fs_roots_radix,
437 				   (unsigned long)root->root_key.objectid,
438 				   BTRFS_ROOT_TRANS_TAG);
439 		spin_unlock(&fs_info->fs_roots_radix_lock);
440 		root->last_trans = trans->transid;
441 
442 		/* this is pretty tricky.  We don't want to
443 		 * take the relocation lock in btrfs_record_root_in_trans
444 		 * unless we're really doing the first setup for this root in
445 		 * this transaction.
446 		 *
447 		 * Normally we'd use root->last_trans as a flag to decide
448 		 * if we want to take the expensive mutex.
449 		 *
450 		 * But, we have to set root->last_trans before we
451 		 * init the relocation root, otherwise, we trip over warnings
452 		 * in ctree.c.  The solution used here is to flag ourselves
453 		 * with root IN_TRANS_SETUP.  When this is 1, we're still
454 		 * fixing up the reloc trees and everyone must wait.
455 		 *
456 		 * When this is zero, they can trust root->last_trans and fly
457 		 * through btrfs_record_root_in_trans without having to take the
458 		 * lock.  smp_wmb() makes sure that all the writes above are
459 		 * done before we pop in the zero below
460 		 */
461 		ret = btrfs_init_reloc_root(trans, root);
462 		smp_mb__before_atomic();
463 		clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
464 	}
465 	return ret;
466 }
467 
468 
btrfs_add_dropped_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)469 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
470 			    struct btrfs_root *root)
471 {
472 	struct btrfs_fs_info *fs_info = root->fs_info;
473 	struct btrfs_transaction *cur_trans = trans->transaction;
474 
475 	/* Add ourselves to the transaction dropped list */
476 	spin_lock(&cur_trans->dropped_roots_lock);
477 	list_add_tail(&root->root_list, &cur_trans->dropped_roots);
478 	spin_unlock(&cur_trans->dropped_roots_lock);
479 
480 	/* Make sure we don't try to update the root at commit time */
481 	spin_lock(&fs_info->fs_roots_radix_lock);
482 	radix_tree_tag_clear(&fs_info->fs_roots_radix,
483 			     (unsigned long)root->root_key.objectid,
484 			     BTRFS_ROOT_TRANS_TAG);
485 	spin_unlock(&fs_info->fs_roots_radix_lock);
486 }
487 
btrfs_record_root_in_trans(struct btrfs_trans_handle * trans,struct btrfs_root * root)488 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
489 			       struct btrfs_root *root)
490 {
491 	struct btrfs_fs_info *fs_info = root->fs_info;
492 	int ret;
493 
494 	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
495 		return 0;
496 
497 	/*
498 	 * see record_root_in_trans for comments about IN_TRANS_SETUP usage
499 	 * and barriers
500 	 */
501 	smp_rmb();
502 	if (root->last_trans == trans->transid &&
503 	    !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
504 		return 0;
505 
506 	mutex_lock(&fs_info->reloc_mutex);
507 	ret = record_root_in_trans(trans, root, 0);
508 	mutex_unlock(&fs_info->reloc_mutex);
509 
510 	return ret;
511 }
512 
is_transaction_blocked(struct btrfs_transaction * trans)513 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
514 {
515 	return (trans->state >= TRANS_STATE_COMMIT_START &&
516 		trans->state < TRANS_STATE_UNBLOCKED &&
517 		!TRANS_ABORTED(trans));
518 }
519 
520 /* wait for commit against the current transaction to become unblocked
521  * when this is done, it is safe to start a new transaction, but the current
522  * transaction might not be fully on disk.
523  */
wait_current_trans(struct btrfs_fs_info * fs_info)524 static void wait_current_trans(struct btrfs_fs_info *fs_info)
525 {
526 	struct btrfs_transaction *cur_trans;
527 
528 	spin_lock(&fs_info->trans_lock);
529 	cur_trans = fs_info->running_transaction;
530 	if (cur_trans && is_transaction_blocked(cur_trans)) {
531 		refcount_inc(&cur_trans->use_count);
532 		spin_unlock(&fs_info->trans_lock);
533 
534 		wait_event(fs_info->transaction_wait,
535 			   cur_trans->state >= TRANS_STATE_UNBLOCKED ||
536 			   TRANS_ABORTED(cur_trans));
537 		btrfs_put_transaction(cur_trans);
538 	} else {
539 		spin_unlock(&fs_info->trans_lock);
540 	}
541 }
542 
may_wait_transaction(struct btrfs_fs_info * fs_info,int type)543 static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
544 {
545 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
546 		return 0;
547 
548 	if (type == TRANS_START)
549 		return 1;
550 
551 	return 0;
552 }
553 
need_reserve_reloc_root(struct btrfs_root * root)554 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
555 {
556 	struct btrfs_fs_info *fs_info = root->fs_info;
557 
558 	if (!fs_info->reloc_ctl ||
559 	    !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
560 	    root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
561 	    root->reloc_root)
562 		return false;
563 
564 	return true;
565 }
566 
567 static struct btrfs_trans_handle *
start_transaction(struct btrfs_root * root,unsigned int num_items,unsigned int type,enum btrfs_reserve_flush_enum flush,bool enforce_qgroups)568 start_transaction(struct btrfs_root *root, unsigned int num_items,
569 		  unsigned int type, enum btrfs_reserve_flush_enum flush,
570 		  bool enforce_qgroups)
571 {
572 	struct btrfs_fs_info *fs_info = root->fs_info;
573 	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
574 	struct btrfs_trans_handle *h;
575 	struct btrfs_transaction *cur_trans;
576 	u64 num_bytes = 0;
577 	u64 qgroup_reserved = 0;
578 	bool reloc_reserved = false;
579 	bool do_chunk_alloc = false;
580 	int ret;
581 
582 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
583 		return ERR_PTR(-EROFS);
584 
585 	if (current->journal_info) {
586 		WARN_ON(type & TRANS_EXTWRITERS);
587 		h = current->journal_info;
588 		refcount_inc(&h->use_count);
589 		WARN_ON(refcount_read(&h->use_count) > 2);
590 		h->orig_rsv = h->block_rsv;
591 		h->block_rsv = NULL;
592 		goto got_it;
593 	}
594 
595 	/*
596 	 * Do the reservation before we join the transaction so we can do all
597 	 * the appropriate flushing if need be.
598 	 */
599 	if (num_items && root != fs_info->chunk_root) {
600 		struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
601 		u64 delayed_refs_bytes = 0;
602 
603 		qgroup_reserved = num_items * fs_info->nodesize;
604 		ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
605 				enforce_qgroups);
606 		if (ret)
607 			return ERR_PTR(ret);
608 
609 		/*
610 		 * We want to reserve all the bytes we may need all at once, so
611 		 * we only do 1 enospc flushing cycle per transaction start.  We
612 		 * accomplish this by simply assuming we'll do 2 x num_items
613 		 * worth of delayed refs updates in this trans handle, and
614 		 * refill that amount for whatever is missing in the reserve.
615 		 */
616 		num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
617 		if (flush == BTRFS_RESERVE_FLUSH_ALL &&
618 		    delayed_refs_rsv->full == 0) {
619 			delayed_refs_bytes = num_bytes;
620 			num_bytes <<= 1;
621 		}
622 
623 		/*
624 		 * Do the reservation for the relocation root creation
625 		 */
626 		if (need_reserve_reloc_root(root)) {
627 			num_bytes += fs_info->nodesize;
628 			reloc_reserved = true;
629 		}
630 
631 		ret = btrfs_block_rsv_add(root, rsv, num_bytes, flush);
632 		if (ret)
633 			goto reserve_fail;
634 		if (delayed_refs_bytes) {
635 			btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
636 							  delayed_refs_bytes);
637 			num_bytes -= delayed_refs_bytes;
638 		}
639 
640 		if (rsv->space_info->force_alloc)
641 			do_chunk_alloc = true;
642 	} else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
643 		   !delayed_refs_rsv->full) {
644 		/*
645 		 * Some people call with btrfs_start_transaction(root, 0)
646 		 * because they can be throttled, but have some other mechanism
647 		 * for reserving space.  We still want these guys to refill the
648 		 * delayed block_rsv so just add 1 items worth of reservation
649 		 * here.
650 		 */
651 		ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
652 		if (ret)
653 			goto reserve_fail;
654 	}
655 again:
656 	h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
657 	if (!h) {
658 		ret = -ENOMEM;
659 		goto alloc_fail;
660 	}
661 
662 	/*
663 	 * If we are JOIN_NOLOCK we're already committing a transaction and
664 	 * waiting on this guy, so we don't need to do the sb_start_intwrite
665 	 * because we're already holding a ref.  We need this because we could
666 	 * have raced in and did an fsync() on a file which can kick a commit
667 	 * and then we deadlock with somebody doing a freeze.
668 	 *
669 	 * If we are ATTACH, it means we just want to catch the current
670 	 * transaction and commit it, so we needn't do sb_start_intwrite().
671 	 */
672 	if (type & __TRANS_FREEZABLE)
673 		sb_start_intwrite(fs_info->sb);
674 
675 	if (may_wait_transaction(fs_info, type))
676 		wait_current_trans(fs_info);
677 
678 	do {
679 		ret = join_transaction(fs_info, type);
680 		if (ret == -EBUSY) {
681 			wait_current_trans(fs_info);
682 			if (unlikely(type == TRANS_ATTACH ||
683 				     type == TRANS_JOIN_NOSTART))
684 				ret = -ENOENT;
685 		}
686 	} while (ret == -EBUSY);
687 
688 	if (ret < 0)
689 		goto join_fail;
690 
691 	cur_trans = fs_info->running_transaction;
692 
693 	h->transid = cur_trans->transid;
694 	h->transaction = cur_trans;
695 	h->root = root;
696 	refcount_set(&h->use_count, 1);
697 	h->fs_info = root->fs_info;
698 
699 	h->type = type;
700 	INIT_LIST_HEAD(&h->new_bgs);
701 
702 	smp_mb();
703 	if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
704 	    may_wait_transaction(fs_info, type)) {
705 		current->journal_info = h;
706 		btrfs_commit_transaction(h);
707 		goto again;
708 	}
709 
710 	if (num_bytes) {
711 		trace_btrfs_space_reservation(fs_info, "transaction",
712 					      h->transid, num_bytes, 1);
713 		h->block_rsv = &fs_info->trans_block_rsv;
714 		h->bytes_reserved = num_bytes;
715 		h->reloc_reserved = reloc_reserved;
716 	}
717 
718 got_it:
719 	if (!current->journal_info)
720 		current->journal_info = h;
721 
722 	/*
723 	 * If the space_info is marked ALLOC_FORCE then we'll get upgraded to
724 	 * ALLOC_FORCE the first run through, and then we won't allocate for
725 	 * anybody else who races in later.  We don't care about the return
726 	 * value here.
727 	 */
728 	if (do_chunk_alloc && num_bytes) {
729 		u64 flags = h->block_rsv->space_info->flags;
730 
731 		btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags),
732 				  CHUNK_ALLOC_NO_FORCE);
733 	}
734 
735 	/*
736 	 * btrfs_record_root_in_trans() needs to alloc new extents, and may
737 	 * call btrfs_join_transaction() while we're also starting a
738 	 * transaction.
739 	 *
740 	 * Thus it need to be called after current->journal_info initialized,
741 	 * or we can deadlock.
742 	 */
743 	ret = btrfs_record_root_in_trans(h, root);
744 	if (ret) {
745 		/*
746 		 * The transaction handle is fully initialized and linked with
747 		 * other structures so it needs to be ended in case of errors,
748 		 * not just freed.
749 		 */
750 		btrfs_end_transaction(h);
751 		return ERR_PTR(ret);
752 	}
753 
754 	return h;
755 
756 join_fail:
757 	if (type & __TRANS_FREEZABLE)
758 		sb_end_intwrite(fs_info->sb);
759 	kmem_cache_free(btrfs_trans_handle_cachep, h);
760 alloc_fail:
761 	if (num_bytes)
762 		btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
763 					num_bytes, NULL);
764 reserve_fail:
765 	btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
766 	return ERR_PTR(ret);
767 }
768 
btrfs_start_transaction(struct btrfs_root * root,unsigned int num_items)769 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
770 						   unsigned int num_items)
771 {
772 	return start_transaction(root, num_items, TRANS_START,
773 				 BTRFS_RESERVE_FLUSH_ALL, true);
774 }
775 
btrfs_start_transaction_fallback_global_rsv(struct btrfs_root * root,unsigned int num_items)776 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
777 					struct btrfs_root *root,
778 					unsigned int num_items)
779 {
780 	return start_transaction(root, num_items, TRANS_START,
781 				 BTRFS_RESERVE_FLUSH_ALL_STEAL, false);
782 }
783 
btrfs_join_transaction(struct btrfs_root * root)784 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
785 {
786 	return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH,
787 				 true);
788 }
789 
btrfs_join_transaction_spacecache(struct btrfs_root * root)790 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root)
791 {
792 	return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
793 				 BTRFS_RESERVE_NO_FLUSH, true);
794 }
795 
796 /*
797  * Similar to regular join but it never starts a transaction when none is
798  * running or after waiting for the current one to finish.
799  */
btrfs_join_transaction_nostart(struct btrfs_root * root)800 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
801 {
802 	return start_transaction(root, 0, TRANS_JOIN_NOSTART,
803 				 BTRFS_RESERVE_NO_FLUSH, true);
804 }
805 
806 /*
807  * btrfs_attach_transaction() - catch the running transaction
808  *
809  * It is used when we want to commit the current the transaction, but
810  * don't want to start a new one.
811  *
812  * Note: If this function return -ENOENT, it just means there is no
813  * running transaction. But it is possible that the inactive transaction
814  * is still in the memory, not fully on disk. If you hope there is no
815  * inactive transaction in the fs when -ENOENT is returned, you should
816  * invoke
817  *     btrfs_attach_transaction_barrier()
818  */
btrfs_attach_transaction(struct btrfs_root * root)819 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
820 {
821 	return start_transaction(root, 0, TRANS_ATTACH,
822 				 BTRFS_RESERVE_NO_FLUSH, true);
823 }
824 
825 /*
826  * btrfs_attach_transaction_barrier() - catch the running transaction
827  *
828  * It is similar to the above function, the difference is this one
829  * will wait for all the inactive transactions until they fully
830  * complete.
831  */
832 struct btrfs_trans_handle *
btrfs_attach_transaction_barrier(struct btrfs_root * root)833 btrfs_attach_transaction_barrier(struct btrfs_root *root)
834 {
835 	struct btrfs_trans_handle *trans;
836 
837 	trans = start_transaction(root, 0, TRANS_ATTACH,
838 				  BTRFS_RESERVE_NO_FLUSH, true);
839 	if (trans == ERR_PTR(-ENOENT))
840 		btrfs_wait_for_commit(root->fs_info, 0);
841 
842 	return trans;
843 }
844 
845 /* Wait for a transaction commit to reach at least the given state. */
wait_for_commit(struct btrfs_transaction * commit,const enum btrfs_trans_state min_state)846 static noinline void wait_for_commit(struct btrfs_transaction *commit,
847 				     const enum btrfs_trans_state min_state)
848 {
849 	wait_event(commit->commit_wait, commit->state >= min_state);
850 }
851 
btrfs_wait_for_commit(struct btrfs_fs_info * fs_info,u64 transid)852 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
853 {
854 	struct btrfs_transaction *cur_trans = NULL, *t;
855 	int ret = 0;
856 
857 	if (transid) {
858 		if (transid <= fs_info->last_trans_committed)
859 			goto out;
860 
861 		/* find specified transaction */
862 		spin_lock(&fs_info->trans_lock);
863 		list_for_each_entry(t, &fs_info->trans_list, list) {
864 			if (t->transid == transid) {
865 				cur_trans = t;
866 				refcount_inc(&cur_trans->use_count);
867 				ret = 0;
868 				break;
869 			}
870 			if (t->transid > transid) {
871 				ret = 0;
872 				break;
873 			}
874 		}
875 		spin_unlock(&fs_info->trans_lock);
876 
877 		/*
878 		 * The specified transaction doesn't exist, or we
879 		 * raced with btrfs_commit_transaction
880 		 */
881 		if (!cur_trans) {
882 			if (transid > fs_info->last_trans_committed)
883 				ret = -EINVAL;
884 			goto out;
885 		}
886 	} else {
887 		/* find newest transaction that is committing | committed */
888 		spin_lock(&fs_info->trans_lock);
889 		list_for_each_entry_reverse(t, &fs_info->trans_list,
890 					    list) {
891 			if (t->state >= TRANS_STATE_COMMIT_START) {
892 				if (t->state == TRANS_STATE_COMPLETED)
893 					break;
894 				cur_trans = t;
895 				refcount_inc(&cur_trans->use_count);
896 				break;
897 			}
898 		}
899 		spin_unlock(&fs_info->trans_lock);
900 		if (!cur_trans)
901 			goto out;  /* nothing committing|committed */
902 	}
903 
904 	wait_for_commit(cur_trans, TRANS_STATE_COMPLETED);
905 	btrfs_put_transaction(cur_trans);
906 out:
907 	return ret;
908 }
909 
btrfs_throttle(struct btrfs_fs_info * fs_info)910 void btrfs_throttle(struct btrfs_fs_info *fs_info)
911 {
912 	wait_current_trans(fs_info);
913 }
914 
should_end_transaction(struct btrfs_trans_handle * trans)915 static bool should_end_transaction(struct btrfs_trans_handle *trans)
916 {
917 	struct btrfs_fs_info *fs_info = trans->fs_info;
918 
919 	if (btrfs_check_space_for_delayed_refs(fs_info))
920 		return true;
921 
922 	return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
923 }
924 
btrfs_should_end_transaction(struct btrfs_trans_handle * trans)925 bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
926 {
927 	struct btrfs_transaction *cur_trans = trans->transaction;
928 
929 	if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
930 	    test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags))
931 		return true;
932 
933 	return should_end_transaction(trans);
934 }
935 
btrfs_trans_release_metadata(struct btrfs_trans_handle * trans)936 static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
937 
938 {
939 	struct btrfs_fs_info *fs_info = trans->fs_info;
940 
941 	if (!trans->block_rsv) {
942 		ASSERT(!trans->bytes_reserved);
943 		return;
944 	}
945 
946 	if (!trans->bytes_reserved)
947 		return;
948 
949 	ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
950 	trace_btrfs_space_reservation(fs_info, "transaction",
951 				      trans->transid, trans->bytes_reserved, 0);
952 	btrfs_block_rsv_release(fs_info, trans->block_rsv,
953 				trans->bytes_reserved, NULL);
954 	trans->bytes_reserved = 0;
955 }
956 
__btrfs_end_transaction(struct btrfs_trans_handle * trans,int throttle)957 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
958 				   int throttle)
959 {
960 	struct btrfs_fs_info *info = trans->fs_info;
961 	struct btrfs_transaction *cur_trans = trans->transaction;
962 	int err = 0;
963 
964 	if (refcount_read(&trans->use_count) > 1) {
965 		refcount_dec(&trans->use_count);
966 		trans->block_rsv = trans->orig_rsv;
967 		return 0;
968 	}
969 
970 	btrfs_trans_release_metadata(trans);
971 	trans->block_rsv = NULL;
972 
973 	btrfs_create_pending_block_groups(trans);
974 
975 	btrfs_trans_release_chunk_metadata(trans);
976 
977 	if (trans->type & __TRANS_FREEZABLE)
978 		sb_end_intwrite(info->sb);
979 
980 	WARN_ON(cur_trans != info->running_transaction);
981 	WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
982 	atomic_dec(&cur_trans->num_writers);
983 	extwriter_counter_dec(cur_trans, trans->type);
984 
985 	cond_wake_up(&cur_trans->writer_wait);
986 	btrfs_put_transaction(cur_trans);
987 
988 	if (current->journal_info == trans)
989 		current->journal_info = NULL;
990 
991 	if (throttle)
992 		btrfs_run_delayed_iputs(info);
993 
994 	if (TRANS_ABORTED(trans) ||
995 	    test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
996 		wake_up_process(info->transaction_kthread);
997 		if (TRANS_ABORTED(trans))
998 			err = trans->aborted;
999 		else
1000 			err = -EROFS;
1001 	}
1002 
1003 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
1004 	return err;
1005 }
1006 
btrfs_end_transaction(struct btrfs_trans_handle * trans)1007 int btrfs_end_transaction(struct btrfs_trans_handle *trans)
1008 {
1009 	return __btrfs_end_transaction(trans, 0);
1010 }
1011 
btrfs_end_transaction_throttle(struct btrfs_trans_handle * trans)1012 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
1013 {
1014 	return __btrfs_end_transaction(trans, 1);
1015 }
1016 
1017 /*
1018  * when btree blocks are allocated, they have some corresponding bits set for
1019  * them in one of two extent_io trees.  This is used to make sure all of
1020  * those extents are sent to disk but does not wait on them
1021  */
btrfs_write_marked_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages,int mark)1022 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
1023 			       struct extent_io_tree *dirty_pages, int mark)
1024 {
1025 	int err = 0;
1026 	int werr = 0;
1027 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
1028 	struct extent_state *cached_state = NULL;
1029 	u64 start = 0;
1030 	u64 end;
1031 
1032 	atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers);
1033 	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
1034 				      mark, &cached_state)) {
1035 		bool wait_writeback = false;
1036 
1037 		err = convert_extent_bit(dirty_pages, start, end,
1038 					 EXTENT_NEED_WAIT,
1039 					 mark, &cached_state);
1040 		/*
1041 		 * convert_extent_bit can return -ENOMEM, which is most of the
1042 		 * time a temporary error. So when it happens, ignore the error
1043 		 * and wait for writeback of this range to finish - because we
1044 		 * failed to set the bit EXTENT_NEED_WAIT for the range, a call
1045 		 * to __btrfs_wait_marked_extents() would not know that
1046 		 * writeback for this range started and therefore wouldn't
1047 		 * wait for it to finish - we don't want to commit a
1048 		 * superblock that points to btree nodes/leafs for which
1049 		 * writeback hasn't finished yet (and without errors).
1050 		 * We cleanup any entries left in the io tree when committing
1051 		 * the transaction (through extent_io_tree_release()).
1052 		 */
1053 		if (err == -ENOMEM) {
1054 			err = 0;
1055 			wait_writeback = true;
1056 		}
1057 		if (!err)
1058 			err = filemap_fdatawrite_range(mapping, start, end);
1059 		if (err)
1060 			werr = err;
1061 		else if (wait_writeback)
1062 			werr = filemap_fdatawait_range(mapping, start, end);
1063 		free_extent_state(cached_state);
1064 		cached_state = NULL;
1065 		cond_resched();
1066 		start = end + 1;
1067 	}
1068 	atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers);
1069 	return werr;
1070 }
1071 
1072 /*
1073  * when btree blocks are allocated, they have some corresponding bits set for
1074  * them in one of two extent_io trees.  This is used to make sure all of
1075  * those extents are on disk for transaction or log commit.  We wait
1076  * on all the pages and clear them from the dirty pages state tree
1077  */
__btrfs_wait_marked_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages)1078 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
1079 				       struct extent_io_tree *dirty_pages)
1080 {
1081 	int err = 0;
1082 	int werr = 0;
1083 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
1084 	struct extent_state *cached_state = NULL;
1085 	u64 start = 0;
1086 	u64 end;
1087 
1088 	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
1089 				      EXTENT_NEED_WAIT, &cached_state)) {
1090 		/*
1091 		 * Ignore -ENOMEM errors returned by clear_extent_bit().
1092 		 * When committing the transaction, we'll remove any entries
1093 		 * left in the io tree. For a log commit, we don't remove them
1094 		 * after committing the log because the tree can be accessed
1095 		 * concurrently - we do it only at transaction commit time when
1096 		 * it's safe to do it (through extent_io_tree_release()).
1097 		 */
1098 		err = clear_extent_bit(dirty_pages, start, end,
1099 				       EXTENT_NEED_WAIT, 0, 0, &cached_state);
1100 		if (err == -ENOMEM)
1101 			err = 0;
1102 		if (!err)
1103 			err = filemap_fdatawait_range(mapping, start, end);
1104 		if (err)
1105 			werr = err;
1106 		free_extent_state(cached_state);
1107 		cached_state = NULL;
1108 		cond_resched();
1109 		start = end + 1;
1110 	}
1111 	if (err)
1112 		werr = err;
1113 	return werr;
1114 }
1115 
btrfs_wait_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages)1116 static int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
1117 		       struct extent_io_tree *dirty_pages)
1118 {
1119 	bool errors = false;
1120 	int err;
1121 
1122 	err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1123 	if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
1124 		errors = true;
1125 
1126 	if (errors && !err)
1127 		err = -EIO;
1128 	return err;
1129 }
1130 
btrfs_wait_tree_log_extents(struct btrfs_root * log_root,int mark)1131 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
1132 {
1133 	struct btrfs_fs_info *fs_info = log_root->fs_info;
1134 	struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
1135 	bool errors = false;
1136 	int err;
1137 
1138 	ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
1139 
1140 	err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1141 	if ((mark & EXTENT_DIRTY) &&
1142 	    test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
1143 		errors = true;
1144 
1145 	if ((mark & EXTENT_NEW) &&
1146 	    test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
1147 		errors = true;
1148 
1149 	if (errors && !err)
1150 		err = -EIO;
1151 	return err;
1152 }
1153 
1154 /*
1155  * When btree blocks are allocated the corresponding extents are marked dirty.
1156  * This function ensures such extents are persisted on disk for transaction or
1157  * log commit.
1158  *
1159  * @trans: transaction whose dirty pages we'd like to write
1160  */
btrfs_write_and_wait_transaction(struct btrfs_trans_handle * trans)1161 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
1162 {
1163 	int ret;
1164 	int ret2;
1165 	struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages;
1166 	struct btrfs_fs_info *fs_info = trans->fs_info;
1167 	struct blk_plug plug;
1168 
1169 	blk_start_plug(&plug);
1170 	ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY);
1171 	blk_finish_plug(&plug);
1172 	ret2 = btrfs_wait_extents(fs_info, dirty_pages);
1173 
1174 	extent_io_tree_release(&trans->transaction->dirty_pages);
1175 
1176 	if (ret)
1177 		return ret;
1178 	else if (ret2)
1179 		return ret2;
1180 	else
1181 		return 0;
1182 }
1183 
1184 /*
1185  * this is used to update the root pointer in the tree of tree roots.
1186  *
1187  * But, in the case of the extent allocation tree, updating the root
1188  * pointer may allocate blocks which may change the root of the extent
1189  * allocation tree.
1190  *
1191  * So, this loops and repeats and makes sure the cowonly root didn't
1192  * change while the root pointer was being updated in the metadata.
1193  */
update_cowonly_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)1194 static int update_cowonly_root(struct btrfs_trans_handle *trans,
1195 			       struct btrfs_root *root)
1196 {
1197 	int ret;
1198 	u64 old_root_bytenr;
1199 	u64 old_root_used;
1200 	struct btrfs_fs_info *fs_info = root->fs_info;
1201 	struct btrfs_root *tree_root = fs_info->tree_root;
1202 
1203 	old_root_used = btrfs_root_used(&root->root_item);
1204 
1205 	while (1) {
1206 		old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1207 		if (old_root_bytenr == root->node->start &&
1208 		    old_root_used == btrfs_root_used(&root->root_item))
1209 			break;
1210 
1211 		btrfs_set_root_node(&root->root_item, root->node);
1212 		ret = btrfs_update_root(trans, tree_root,
1213 					&root->root_key,
1214 					&root->root_item);
1215 		if (ret)
1216 			return ret;
1217 
1218 		old_root_used = btrfs_root_used(&root->root_item);
1219 	}
1220 
1221 	return 0;
1222 }
1223 
1224 /*
1225  * update all the cowonly tree roots on disk
1226  *
1227  * The error handling in this function may not be obvious. Any of the
1228  * failures will cause the file system to go offline. We still need
1229  * to clean up the delayed refs.
1230  */
commit_cowonly_roots(struct btrfs_trans_handle * trans)1231 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
1232 {
1233 	struct btrfs_fs_info *fs_info = trans->fs_info;
1234 	struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1235 	struct list_head *io_bgs = &trans->transaction->io_bgs;
1236 	struct list_head *next;
1237 	struct extent_buffer *eb;
1238 	int ret;
1239 
1240 	eb = btrfs_lock_root_node(fs_info->tree_root);
1241 	ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1242 			      0, &eb, BTRFS_NESTING_COW);
1243 	btrfs_tree_unlock(eb);
1244 	free_extent_buffer(eb);
1245 
1246 	if (ret)
1247 		return ret;
1248 
1249 	ret = btrfs_run_dev_stats(trans);
1250 	if (ret)
1251 		return ret;
1252 	ret = btrfs_run_dev_replace(trans);
1253 	if (ret)
1254 		return ret;
1255 	ret = btrfs_run_qgroups(trans);
1256 	if (ret)
1257 		return ret;
1258 
1259 	ret = btrfs_setup_space_cache(trans);
1260 	if (ret)
1261 		return ret;
1262 
1263 again:
1264 	while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1265 		struct btrfs_root *root;
1266 		next = fs_info->dirty_cowonly_roots.next;
1267 		list_del_init(next);
1268 		root = list_entry(next, struct btrfs_root, dirty_list);
1269 		clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1270 
1271 		if (root != fs_info->extent_root)
1272 			list_add_tail(&root->dirty_list,
1273 				      &trans->transaction->switch_commits);
1274 		ret = update_cowonly_root(trans, root);
1275 		if (ret)
1276 			return ret;
1277 	}
1278 
1279 	/* Now flush any delayed refs generated by updating all of the roots */
1280 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1281 	if (ret)
1282 		return ret;
1283 
1284 	while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1285 		ret = btrfs_write_dirty_block_groups(trans);
1286 		if (ret)
1287 			return ret;
1288 
1289 		/*
1290 		 * We're writing the dirty block groups, which could generate
1291 		 * delayed refs, which could generate more dirty block groups,
1292 		 * so we want to keep this flushing in this loop to make sure
1293 		 * everything gets run.
1294 		 */
1295 		ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1296 		if (ret)
1297 			return ret;
1298 	}
1299 
1300 	if (!list_empty(&fs_info->dirty_cowonly_roots))
1301 		goto again;
1302 
1303 	list_add_tail(&fs_info->extent_root->dirty_list,
1304 		      &trans->transaction->switch_commits);
1305 
1306 	/* Update dev-replace pointer once everything is committed */
1307 	fs_info->dev_replace.committed_cursor_left =
1308 		fs_info->dev_replace.cursor_left_last_write_of_item;
1309 
1310 	return 0;
1311 }
1312 
1313 /*
1314  * dead roots are old snapshots that need to be deleted.  This allocates
1315  * a dirty root struct and adds it into the list of dead roots that need to
1316  * be deleted
1317  */
btrfs_add_dead_root(struct btrfs_root * root)1318 void btrfs_add_dead_root(struct btrfs_root *root)
1319 {
1320 	struct btrfs_fs_info *fs_info = root->fs_info;
1321 
1322 	spin_lock(&fs_info->trans_lock);
1323 	if (list_empty(&root->root_list)) {
1324 		btrfs_grab_root(root);
1325 		list_add_tail(&root->root_list, &fs_info->dead_roots);
1326 	}
1327 	spin_unlock(&fs_info->trans_lock);
1328 }
1329 
1330 /*
1331  * update all the cowonly tree roots on disk
1332  */
commit_fs_roots(struct btrfs_trans_handle * trans)1333 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
1334 {
1335 	struct btrfs_fs_info *fs_info = trans->fs_info;
1336 	struct btrfs_root *gang[8];
1337 	int i;
1338 	int ret;
1339 
1340 	spin_lock(&fs_info->fs_roots_radix_lock);
1341 	while (1) {
1342 		ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1343 						 (void **)gang, 0,
1344 						 ARRAY_SIZE(gang),
1345 						 BTRFS_ROOT_TRANS_TAG);
1346 		if (ret == 0)
1347 			break;
1348 		for (i = 0; i < ret; i++) {
1349 			struct btrfs_root *root = gang[i];
1350 			int ret2;
1351 
1352 			radix_tree_tag_clear(&fs_info->fs_roots_radix,
1353 					(unsigned long)root->root_key.objectid,
1354 					BTRFS_ROOT_TRANS_TAG);
1355 			spin_unlock(&fs_info->fs_roots_radix_lock);
1356 
1357 			btrfs_free_log(trans, root);
1358 			ret2 = btrfs_update_reloc_root(trans, root);
1359 			if (ret2)
1360 				return ret2;
1361 
1362 			/* see comments in should_cow_block() */
1363 			clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1364 			smp_mb__after_atomic();
1365 
1366 			if (root->commit_root != root->node) {
1367 				list_add_tail(&root->dirty_list,
1368 					&trans->transaction->switch_commits);
1369 				btrfs_set_root_node(&root->root_item,
1370 						    root->node);
1371 			}
1372 
1373 			ret2 = btrfs_update_root(trans, fs_info->tree_root,
1374 						&root->root_key,
1375 						&root->root_item);
1376 			if (ret2)
1377 				return ret2;
1378 			spin_lock(&fs_info->fs_roots_radix_lock);
1379 			btrfs_qgroup_free_meta_all_pertrans(root);
1380 		}
1381 	}
1382 	spin_unlock(&fs_info->fs_roots_radix_lock);
1383 	return 0;
1384 }
1385 
1386 /*
1387  * defrag a given btree.
1388  * Every leaf in the btree is read and defragged.
1389  */
btrfs_defrag_root(struct btrfs_root * root)1390 int btrfs_defrag_root(struct btrfs_root *root)
1391 {
1392 	struct btrfs_fs_info *info = root->fs_info;
1393 	struct btrfs_trans_handle *trans;
1394 	int ret;
1395 
1396 	if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
1397 		return 0;
1398 
1399 	while (1) {
1400 		trans = btrfs_start_transaction(root, 0);
1401 		if (IS_ERR(trans)) {
1402 			ret = PTR_ERR(trans);
1403 			break;
1404 		}
1405 
1406 		ret = btrfs_defrag_leaves(trans, root);
1407 
1408 		btrfs_end_transaction(trans);
1409 		btrfs_btree_balance_dirty(info);
1410 		cond_resched();
1411 
1412 		if (btrfs_fs_closing(info) || ret != -EAGAIN)
1413 			break;
1414 
1415 		if (btrfs_defrag_cancelled(info)) {
1416 			btrfs_debug(info, "defrag_root cancelled");
1417 			ret = -EAGAIN;
1418 			break;
1419 		}
1420 	}
1421 	clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
1422 	return ret;
1423 }
1424 
1425 /*
1426  * Do all special snapshot related qgroup dirty hack.
1427  *
1428  * Will do all needed qgroup inherit and dirty hack like switch commit
1429  * roots inside one transaction and write all btree into disk, to make
1430  * qgroup works.
1431  */
qgroup_account_snapshot(struct btrfs_trans_handle * trans,struct btrfs_root * src,struct btrfs_root * parent,struct btrfs_qgroup_inherit * inherit,u64 dst_objectid)1432 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1433 				   struct btrfs_root *src,
1434 				   struct btrfs_root *parent,
1435 				   struct btrfs_qgroup_inherit *inherit,
1436 				   u64 dst_objectid)
1437 {
1438 	struct btrfs_fs_info *fs_info = src->fs_info;
1439 	int ret;
1440 
1441 	/*
1442 	 * Save some performance in the case that qgroups are not
1443 	 * enabled. If this check races with the ioctl, rescan will
1444 	 * kick in anyway.
1445 	 */
1446 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1447 		return 0;
1448 
1449 	/*
1450 	 * Ensure dirty @src will be committed.  Or, after coming
1451 	 * commit_fs_roots() and switch_commit_roots(), any dirty but not
1452 	 * recorded root will never be updated again, causing an outdated root
1453 	 * item.
1454 	 */
1455 	ret = record_root_in_trans(trans, src, 1);
1456 	if (ret)
1457 		return ret;
1458 
1459 	/*
1460 	 * btrfs_qgroup_inherit relies on a consistent view of the usage for the
1461 	 * src root, so we must run the delayed refs here.
1462 	 *
1463 	 * However this isn't particularly fool proof, because there's no
1464 	 * synchronization keeping us from changing the tree after this point
1465 	 * before we do the qgroup_inherit, or even from making changes while
1466 	 * we're doing the qgroup_inherit.  But that's a problem for the future,
1467 	 * for now flush the delayed refs to narrow the race window where the
1468 	 * qgroup counters could end up wrong.
1469 	 */
1470 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1471 	if (ret) {
1472 		btrfs_abort_transaction(trans, ret);
1473 		return ret;
1474 	}
1475 
1476 	/*
1477 	 * We are going to commit transaction, see btrfs_commit_transaction()
1478 	 * comment for reason locking tree_log_mutex
1479 	 */
1480 	mutex_lock(&fs_info->tree_log_mutex);
1481 
1482 	ret = commit_fs_roots(trans);
1483 	if (ret)
1484 		goto out;
1485 	ret = btrfs_qgroup_account_extents(trans);
1486 	if (ret < 0)
1487 		goto out;
1488 
1489 	/* Now qgroup are all updated, we can inherit it to new qgroups */
1490 	ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
1491 				   inherit);
1492 	if (ret < 0)
1493 		goto out;
1494 
1495 	/*
1496 	 * Now we do a simplified commit transaction, which will:
1497 	 * 1) commit all subvolume and extent tree
1498 	 *    To ensure all subvolume and extent tree have a valid
1499 	 *    commit_root to accounting later insert_dir_item()
1500 	 * 2) write all btree blocks onto disk
1501 	 *    This is to make sure later btree modification will be cowed
1502 	 *    Or commit_root can be populated and cause wrong qgroup numbers
1503 	 * In this simplified commit, we don't really care about other trees
1504 	 * like chunk and root tree, as they won't affect qgroup.
1505 	 * And we don't write super to avoid half committed status.
1506 	 */
1507 	ret = commit_cowonly_roots(trans);
1508 	if (ret)
1509 		goto out;
1510 	switch_commit_roots(trans);
1511 	ret = btrfs_write_and_wait_transaction(trans);
1512 	if (ret)
1513 		btrfs_handle_fs_error(fs_info, ret,
1514 			"Error while writing out transaction for qgroup");
1515 
1516 out:
1517 	mutex_unlock(&fs_info->tree_log_mutex);
1518 
1519 	/*
1520 	 * Force parent root to be updated, as we recorded it before so its
1521 	 * last_trans == cur_transid.
1522 	 * Or it won't be committed again onto disk after later
1523 	 * insert_dir_item()
1524 	 */
1525 	if (!ret)
1526 		ret = record_root_in_trans(trans, parent, 1);
1527 	return ret;
1528 }
1529 
1530 /*
1531  * new snapshots need to be created at a very specific time in the
1532  * transaction commit.  This does the actual creation.
1533  *
1534  * Note:
1535  * If the error which may affect the commitment of the current transaction
1536  * happens, we should return the error number. If the error which just affect
1537  * the creation of the pending snapshots, just return 0.
1538  */
create_pending_snapshot(struct btrfs_trans_handle * trans,struct btrfs_pending_snapshot * pending)1539 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1540 				   struct btrfs_pending_snapshot *pending)
1541 {
1542 
1543 	struct btrfs_fs_info *fs_info = trans->fs_info;
1544 	struct btrfs_key key;
1545 	struct btrfs_root_item *new_root_item;
1546 	struct btrfs_root *tree_root = fs_info->tree_root;
1547 	struct btrfs_root *root = pending->root;
1548 	struct btrfs_root *parent_root;
1549 	struct btrfs_block_rsv *rsv;
1550 	struct inode *parent_inode;
1551 	struct btrfs_path *path;
1552 	struct btrfs_dir_item *dir_item;
1553 	struct dentry *dentry;
1554 	struct extent_buffer *tmp;
1555 	struct extent_buffer *old;
1556 	struct timespec64 cur_time;
1557 	int ret = 0;
1558 	u64 to_reserve = 0;
1559 	u64 index = 0;
1560 	u64 objectid;
1561 	u64 root_flags;
1562 
1563 	ASSERT(pending->path);
1564 	path = pending->path;
1565 
1566 	ASSERT(pending->root_item);
1567 	new_root_item = pending->root_item;
1568 
1569 	pending->error = btrfs_get_free_objectid(tree_root, &objectid);
1570 	if (pending->error)
1571 		goto no_free_objectid;
1572 
1573 	/*
1574 	 * Make qgroup to skip current new snapshot's qgroupid, as it is
1575 	 * accounted by later btrfs_qgroup_inherit().
1576 	 */
1577 	btrfs_set_skip_qgroup(trans, objectid);
1578 
1579 	btrfs_reloc_pre_snapshot(pending, &to_reserve);
1580 
1581 	if (to_reserve > 0) {
1582 		pending->error = btrfs_block_rsv_add(root,
1583 						     &pending->block_rsv,
1584 						     to_reserve,
1585 						     BTRFS_RESERVE_NO_FLUSH);
1586 		if (pending->error)
1587 			goto clear_skip_qgroup;
1588 	}
1589 
1590 	key.objectid = objectid;
1591 	key.offset = (u64)-1;
1592 	key.type = BTRFS_ROOT_ITEM_KEY;
1593 
1594 	rsv = trans->block_rsv;
1595 	trans->block_rsv = &pending->block_rsv;
1596 	trans->bytes_reserved = trans->block_rsv->reserved;
1597 	trace_btrfs_space_reservation(fs_info, "transaction",
1598 				      trans->transid,
1599 				      trans->bytes_reserved, 1);
1600 	dentry = pending->dentry;
1601 	parent_inode = pending->dir;
1602 	parent_root = BTRFS_I(parent_inode)->root;
1603 	ret = record_root_in_trans(trans, parent_root, 0);
1604 	if (ret)
1605 		goto fail;
1606 	cur_time = current_time(parent_inode);
1607 
1608 	/*
1609 	 * insert the directory item
1610 	 */
1611 	ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
1612 	BUG_ON(ret); /* -ENOMEM */
1613 
1614 	/* check if there is a file/dir which has the same name. */
1615 	dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1616 					 btrfs_ino(BTRFS_I(parent_inode)),
1617 					 dentry->d_name.name,
1618 					 dentry->d_name.len, 0);
1619 	if (dir_item != NULL && !IS_ERR(dir_item)) {
1620 		pending->error = -EEXIST;
1621 		goto dir_item_existed;
1622 	} else if (IS_ERR(dir_item)) {
1623 		ret = PTR_ERR(dir_item);
1624 		btrfs_abort_transaction(trans, ret);
1625 		goto fail;
1626 	}
1627 	btrfs_release_path(path);
1628 
1629 	/*
1630 	 * pull in the delayed directory update
1631 	 * and the delayed inode item
1632 	 * otherwise we corrupt the FS during
1633 	 * snapshot
1634 	 */
1635 	ret = btrfs_run_delayed_items(trans);
1636 	if (ret) {	/* Transaction aborted */
1637 		btrfs_abort_transaction(trans, ret);
1638 		goto fail;
1639 	}
1640 
1641 	ret = record_root_in_trans(trans, root, 0);
1642 	if (ret) {
1643 		btrfs_abort_transaction(trans, ret);
1644 		goto fail;
1645 	}
1646 	btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1647 	memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1648 	btrfs_check_and_init_root_item(new_root_item);
1649 
1650 	root_flags = btrfs_root_flags(new_root_item);
1651 	if (pending->readonly)
1652 		root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1653 	else
1654 		root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1655 	btrfs_set_root_flags(new_root_item, root_flags);
1656 
1657 	btrfs_set_root_generation_v2(new_root_item,
1658 			trans->transid);
1659 	generate_random_guid(new_root_item->uuid);
1660 	memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1661 			BTRFS_UUID_SIZE);
1662 	if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1663 		memset(new_root_item->received_uuid, 0,
1664 		       sizeof(new_root_item->received_uuid));
1665 		memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1666 		memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1667 		btrfs_set_root_stransid(new_root_item, 0);
1668 		btrfs_set_root_rtransid(new_root_item, 0);
1669 	}
1670 	btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1671 	btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1672 	btrfs_set_root_otransid(new_root_item, trans->transid);
1673 
1674 	old = btrfs_lock_root_node(root);
1675 	ret = btrfs_cow_block(trans, root, old, NULL, 0, &old,
1676 			      BTRFS_NESTING_COW);
1677 	if (ret) {
1678 		btrfs_tree_unlock(old);
1679 		free_extent_buffer(old);
1680 		btrfs_abort_transaction(trans, ret);
1681 		goto fail;
1682 	}
1683 
1684 	ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1685 	/* clean up in any case */
1686 	btrfs_tree_unlock(old);
1687 	free_extent_buffer(old);
1688 	if (ret) {
1689 		btrfs_abort_transaction(trans, ret);
1690 		goto fail;
1691 	}
1692 	/* see comments in should_cow_block() */
1693 	set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1694 	smp_wmb();
1695 
1696 	btrfs_set_root_node(new_root_item, tmp);
1697 	/* record when the snapshot was created in key.offset */
1698 	key.offset = trans->transid;
1699 	ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1700 	btrfs_tree_unlock(tmp);
1701 	free_extent_buffer(tmp);
1702 	if (ret) {
1703 		btrfs_abort_transaction(trans, ret);
1704 		goto fail;
1705 	}
1706 
1707 	/*
1708 	 * insert root back/forward references
1709 	 */
1710 	ret = btrfs_add_root_ref(trans, objectid,
1711 				 parent_root->root_key.objectid,
1712 				 btrfs_ino(BTRFS_I(parent_inode)), index,
1713 				 dentry->d_name.name, dentry->d_name.len);
1714 	if (ret) {
1715 		btrfs_abort_transaction(trans, ret);
1716 		goto fail;
1717 	}
1718 
1719 	key.offset = (u64)-1;
1720 	pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev);
1721 	if (IS_ERR(pending->snap)) {
1722 		ret = PTR_ERR(pending->snap);
1723 		pending->snap = NULL;
1724 		btrfs_abort_transaction(trans, ret);
1725 		goto fail;
1726 	}
1727 
1728 	ret = btrfs_reloc_post_snapshot(trans, pending);
1729 	if (ret) {
1730 		btrfs_abort_transaction(trans, ret);
1731 		goto fail;
1732 	}
1733 
1734 	/*
1735 	 * Do special qgroup accounting for snapshot, as we do some qgroup
1736 	 * snapshot hack to do fast snapshot.
1737 	 * To co-operate with that hack, we do hack again.
1738 	 * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1739 	 */
1740 	ret = qgroup_account_snapshot(trans, root, parent_root,
1741 				      pending->inherit, objectid);
1742 	if (ret < 0)
1743 		goto fail;
1744 
1745 	ret = btrfs_insert_dir_item(trans, dentry->d_name.name,
1746 				    dentry->d_name.len, BTRFS_I(parent_inode),
1747 				    &key, BTRFS_FT_DIR, index);
1748 	/* We have check then name at the beginning, so it is impossible. */
1749 	BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1750 	if (ret) {
1751 		btrfs_abort_transaction(trans, ret);
1752 		goto fail;
1753 	}
1754 
1755 	btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
1756 					 dentry->d_name.len * 2);
1757 	parent_inode->i_mtime = parent_inode->i_ctime =
1758 		current_time(parent_inode);
1759 	ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode));
1760 	if (ret) {
1761 		btrfs_abort_transaction(trans, ret);
1762 		goto fail;
1763 	}
1764 	ret = btrfs_uuid_tree_add(trans, new_root_item->uuid,
1765 				  BTRFS_UUID_KEY_SUBVOL,
1766 				  objectid);
1767 	if (ret) {
1768 		btrfs_abort_transaction(trans, ret);
1769 		goto fail;
1770 	}
1771 	if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1772 		ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
1773 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1774 					  objectid);
1775 		if (ret && ret != -EEXIST) {
1776 			btrfs_abort_transaction(trans, ret);
1777 			goto fail;
1778 		}
1779 	}
1780 
1781 fail:
1782 	pending->error = ret;
1783 dir_item_existed:
1784 	trans->block_rsv = rsv;
1785 	trans->bytes_reserved = 0;
1786 clear_skip_qgroup:
1787 	btrfs_clear_skip_qgroup(trans);
1788 no_free_objectid:
1789 	kfree(new_root_item);
1790 	pending->root_item = NULL;
1791 	btrfs_free_path(path);
1792 	pending->path = NULL;
1793 
1794 	return ret;
1795 }
1796 
1797 /*
1798  * create all the snapshots we've scheduled for creation
1799  */
create_pending_snapshots(struct btrfs_trans_handle * trans)1800 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans)
1801 {
1802 	struct btrfs_pending_snapshot *pending, *next;
1803 	struct list_head *head = &trans->transaction->pending_snapshots;
1804 	int ret = 0;
1805 
1806 	list_for_each_entry_safe(pending, next, head, list) {
1807 		list_del(&pending->list);
1808 		ret = create_pending_snapshot(trans, pending);
1809 		if (ret)
1810 			break;
1811 	}
1812 	return ret;
1813 }
1814 
update_super_roots(struct btrfs_fs_info * fs_info)1815 static void update_super_roots(struct btrfs_fs_info *fs_info)
1816 {
1817 	struct btrfs_root_item *root_item;
1818 	struct btrfs_super_block *super;
1819 
1820 	super = fs_info->super_copy;
1821 
1822 	root_item = &fs_info->chunk_root->root_item;
1823 	super->chunk_root = root_item->bytenr;
1824 	super->chunk_root_generation = root_item->generation;
1825 	super->chunk_root_level = root_item->level;
1826 
1827 	root_item = &fs_info->tree_root->root_item;
1828 	super->root = root_item->bytenr;
1829 	super->generation = root_item->generation;
1830 	super->root_level = root_item->level;
1831 	if (btrfs_test_opt(fs_info, SPACE_CACHE))
1832 		super->cache_generation = root_item->generation;
1833 	else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags))
1834 		super->cache_generation = 0;
1835 	if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
1836 		super->uuid_tree_generation = root_item->generation;
1837 }
1838 
btrfs_transaction_in_commit(struct btrfs_fs_info * info)1839 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1840 {
1841 	struct btrfs_transaction *trans;
1842 	int ret = 0;
1843 
1844 	spin_lock(&info->trans_lock);
1845 	trans = info->running_transaction;
1846 	if (trans)
1847 		ret = (trans->state >= TRANS_STATE_COMMIT_START);
1848 	spin_unlock(&info->trans_lock);
1849 	return ret;
1850 }
1851 
btrfs_transaction_blocked(struct btrfs_fs_info * info)1852 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1853 {
1854 	struct btrfs_transaction *trans;
1855 	int ret = 0;
1856 
1857 	spin_lock(&info->trans_lock);
1858 	trans = info->running_transaction;
1859 	if (trans)
1860 		ret = is_transaction_blocked(trans);
1861 	spin_unlock(&info->trans_lock);
1862 	return ret;
1863 }
1864 
1865 /*
1866  * commit transactions asynchronously. once btrfs_commit_transaction_async
1867  * returns, any subsequent transaction will not be allowed to join.
1868  */
1869 struct btrfs_async_commit {
1870 	struct btrfs_trans_handle *newtrans;
1871 	struct work_struct work;
1872 };
1873 
do_async_commit(struct work_struct * work)1874 static void do_async_commit(struct work_struct *work)
1875 {
1876 	struct btrfs_async_commit *ac =
1877 		container_of(work, struct btrfs_async_commit, work);
1878 
1879 	/*
1880 	 * We've got freeze protection passed with the transaction.
1881 	 * Tell lockdep about it.
1882 	 */
1883 	if (ac->newtrans->type & __TRANS_FREEZABLE)
1884 		__sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS);
1885 
1886 	current->journal_info = ac->newtrans;
1887 
1888 	btrfs_commit_transaction(ac->newtrans);
1889 	kfree(ac);
1890 }
1891 
btrfs_commit_transaction_async(struct btrfs_trans_handle * trans)1892 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans)
1893 {
1894 	struct btrfs_fs_info *fs_info = trans->fs_info;
1895 	struct btrfs_async_commit *ac;
1896 	struct btrfs_transaction *cur_trans;
1897 
1898 	ac = kmalloc(sizeof(*ac), GFP_NOFS);
1899 	if (!ac)
1900 		return -ENOMEM;
1901 
1902 	INIT_WORK(&ac->work, do_async_commit);
1903 	ac->newtrans = btrfs_join_transaction(trans->root);
1904 	if (IS_ERR(ac->newtrans)) {
1905 		int err = PTR_ERR(ac->newtrans);
1906 		kfree(ac);
1907 		return err;
1908 	}
1909 
1910 	/* take transaction reference */
1911 	cur_trans = trans->transaction;
1912 	refcount_inc(&cur_trans->use_count);
1913 
1914 	btrfs_end_transaction(trans);
1915 
1916 	/*
1917 	 * Tell lockdep we've released the freeze rwsem, since the
1918 	 * async commit thread will be the one to unlock it.
1919 	 */
1920 	if (ac->newtrans->type & __TRANS_FREEZABLE)
1921 		__sb_writers_release(fs_info->sb, SB_FREEZE_FS);
1922 
1923 	schedule_work(&ac->work);
1924 	/*
1925 	 * Wait for the current transaction commit to start and block
1926 	 * subsequent transaction joins
1927 	 */
1928 	wait_event(fs_info->transaction_blocked_wait,
1929 		   cur_trans->state >= TRANS_STATE_COMMIT_START ||
1930 		   TRANS_ABORTED(cur_trans));
1931 	if (current->journal_info == trans)
1932 		current->journal_info = NULL;
1933 
1934 	btrfs_put_transaction(cur_trans);
1935 	return 0;
1936 }
1937 
1938 
cleanup_transaction(struct btrfs_trans_handle * trans,int err)1939 static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
1940 {
1941 	struct btrfs_fs_info *fs_info = trans->fs_info;
1942 	struct btrfs_transaction *cur_trans = trans->transaction;
1943 
1944 	WARN_ON(refcount_read(&trans->use_count) > 1);
1945 
1946 	btrfs_abort_transaction(trans, err);
1947 
1948 	spin_lock(&fs_info->trans_lock);
1949 
1950 	/*
1951 	 * If the transaction is removed from the list, it means this
1952 	 * transaction has been committed successfully, so it is impossible
1953 	 * to call the cleanup function.
1954 	 */
1955 	BUG_ON(list_empty(&cur_trans->list));
1956 
1957 	if (cur_trans == fs_info->running_transaction) {
1958 		cur_trans->state = TRANS_STATE_COMMIT_DOING;
1959 		spin_unlock(&fs_info->trans_lock);
1960 		wait_event(cur_trans->writer_wait,
1961 			   atomic_read(&cur_trans->num_writers) == 1);
1962 
1963 		spin_lock(&fs_info->trans_lock);
1964 	}
1965 
1966 	/*
1967 	 * Now that we know no one else is still using the transaction we can
1968 	 * remove the transaction from the list of transactions. This avoids
1969 	 * the transaction kthread from cleaning up the transaction while some
1970 	 * other task is still using it, which could result in a use-after-free
1971 	 * on things like log trees, as it forces the transaction kthread to
1972 	 * wait for this transaction to be cleaned up by us.
1973 	 */
1974 	list_del_init(&cur_trans->list);
1975 
1976 	spin_unlock(&fs_info->trans_lock);
1977 
1978 	btrfs_cleanup_one_transaction(trans->transaction, fs_info);
1979 
1980 	spin_lock(&fs_info->trans_lock);
1981 	if (cur_trans == fs_info->running_transaction)
1982 		fs_info->running_transaction = NULL;
1983 	spin_unlock(&fs_info->trans_lock);
1984 
1985 	if (trans->type & __TRANS_FREEZABLE)
1986 		sb_end_intwrite(fs_info->sb);
1987 	btrfs_put_transaction(cur_trans);
1988 	btrfs_put_transaction(cur_trans);
1989 
1990 	trace_btrfs_transaction_commit(trans->root);
1991 
1992 	if (current->journal_info == trans)
1993 		current->journal_info = NULL;
1994 	btrfs_scrub_cancel(fs_info);
1995 
1996 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
1997 }
1998 
1999 /*
2000  * Release reserved delayed ref space of all pending block groups of the
2001  * transaction and remove them from the list
2002  */
btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle * trans)2003 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
2004 {
2005        struct btrfs_fs_info *fs_info = trans->fs_info;
2006        struct btrfs_block_group *block_group, *tmp;
2007 
2008        list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
2009                btrfs_delayed_refs_rsv_release(fs_info, 1);
2010                list_del_init(&block_group->bg_list);
2011        }
2012 }
2013 
btrfs_start_delalloc_flush(struct btrfs_fs_info * fs_info)2014 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
2015 {
2016 	/*
2017 	 * We use writeback_inodes_sb here because if we used
2018 	 * btrfs_start_delalloc_roots we would deadlock with fs freeze.
2019 	 * Currently are holding the fs freeze lock, if we do an async flush
2020 	 * we'll do btrfs_join_transaction() and deadlock because we need to
2021 	 * wait for the fs freeze lock.  Using the direct flushing we benefit
2022 	 * from already being in a transaction and our join_transaction doesn't
2023 	 * have to re-take the fs freeze lock.
2024 	 */
2025 	if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2026 		writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
2027 	return 0;
2028 }
2029 
btrfs_wait_delalloc_flush(struct btrfs_fs_info * fs_info)2030 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
2031 {
2032 	if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2033 		btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
2034 }
2035 
btrfs_commit_transaction(struct btrfs_trans_handle * trans)2036 int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2037 {
2038 	struct btrfs_fs_info *fs_info = trans->fs_info;
2039 	struct btrfs_transaction *cur_trans = trans->transaction;
2040 	struct btrfs_transaction *prev_trans = NULL;
2041 	int ret;
2042 
2043 	ASSERT(refcount_read(&trans->use_count) == 1);
2044 
2045 	/* Stop the commit early if ->aborted is set */
2046 	if (TRANS_ABORTED(cur_trans)) {
2047 		ret = cur_trans->aborted;
2048 		btrfs_end_transaction(trans);
2049 		return ret;
2050 	}
2051 
2052 	btrfs_trans_release_metadata(trans);
2053 	trans->block_rsv = NULL;
2054 
2055 	/*
2056 	 * We only want one transaction commit doing the flushing so we do not
2057 	 * waste a bunch of time on lock contention on the extent root node.
2058 	 */
2059 	if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING,
2060 			      &cur_trans->delayed_refs.flags)) {
2061 		/*
2062 		 * Make a pass through all the delayed refs we have so far.
2063 		 * Any running threads may add more while we are here.
2064 		 */
2065 		ret = btrfs_run_delayed_refs(trans, 0);
2066 		if (ret) {
2067 			btrfs_end_transaction(trans);
2068 			return ret;
2069 		}
2070 	}
2071 
2072 	btrfs_create_pending_block_groups(trans);
2073 
2074 	if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
2075 		int run_it = 0;
2076 
2077 		/* this mutex is also taken before trying to set
2078 		 * block groups readonly.  We need to make sure
2079 		 * that nobody has set a block group readonly
2080 		 * after a extents from that block group have been
2081 		 * allocated for cache files.  btrfs_set_block_group_ro
2082 		 * will wait for the transaction to commit if it
2083 		 * finds BTRFS_TRANS_DIRTY_BG_RUN set.
2084 		 *
2085 		 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
2086 		 * only one process starts all the block group IO.  It wouldn't
2087 		 * hurt to have more than one go through, but there's no
2088 		 * real advantage to it either.
2089 		 */
2090 		mutex_lock(&fs_info->ro_block_group_mutex);
2091 		if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
2092 				      &cur_trans->flags))
2093 			run_it = 1;
2094 		mutex_unlock(&fs_info->ro_block_group_mutex);
2095 
2096 		if (run_it) {
2097 			ret = btrfs_start_dirty_block_groups(trans);
2098 			if (ret) {
2099 				btrfs_end_transaction(trans);
2100 				return ret;
2101 			}
2102 		}
2103 	}
2104 
2105 	spin_lock(&fs_info->trans_lock);
2106 	if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
2107 		enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
2108 
2109 		spin_unlock(&fs_info->trans_lock);
2110 		refcount_inc(&cur_trans->use_count);
2111 
2112 		if (trans->in_fsync)
2113 			want_state = TRANS_STATE_SUPER_COMMITTED;
2114 		ret = btrfs_end_transaction(trans);
2115 		wait_for_commit(cur_trans, want_state);
2116 
2117 		if (TRANS_ABORTED(cur_trans))
2118 			ret = cur_trans->aborted;
2119 
2120 		btrfs_put_transaction(cur_trans);
2121 
2122 		return ret;
2123 	}
2124 
2125 	cur_trans->state = TRANS_STATE_COMMIT_START;
2126 	wake_up(&fs_info->transaction_blocked_wait);
2127 
2128 	if (cur_trans->list.prev != &fs_info->trans_list) {
2129 		enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
2130 
2131 		if (trans->in_fsync)
2132 			want_state = TRANS_STATE_SUPER_COMMITTED;
2133 
2134 		prev_trans = list_entry(cur_trans->list.prev,
2135 					struct btrfs_transaction, list);
2136 		if (prev_trans->state < want_state) {
2137 			refcount_inc(&prev_trans->use_count);
2138 			spin_unlock(&fs_info->trans_lock);
2139 
2140 			wait_for_commit(prev_trans, want_state);
2141 
2142 			ret = READ_ONCE(prev_trans->aborted);
2143 
2144 			btrfs_put_transaction(prev_trans);
2145 			if (ret)
2146 				goto cleanup_transaction;
2147 		} else {
2148 			spin_unlock(&fs_info->trans_lock);
2149 		}
2150 	} else {
2151 		spin_unlock(&fs_info->trans_lock);
2152 		/*
2153 		 * The previous transaction was aborted and was already removed
2154 		 * from the list of transactions at fs_info->trans_list. So we
2155 		 * abort to prevent writing a new superblock that reflects a
2156 		 * corrupt state (pointing to trees with unwritten nodes/leafs).
2157 		 */
2158 		if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
2159 			ret = -EROFS;
2160 			goto cleanup_transaction;
2161 		}
2162 	}
2163 
2164 	extwriter_counter_dec(cur_trans, trans->type);
2165 
2166 	ret = btrfs_start_delalloc_flush(fs_info);
2167 	if (ret)
2168 		goto cleanup_transaction;
2169 
2170 	ret = btrfs_run_delayed_items(trans);
2171 	if (ret)
2172 		goto cleanup_transaction;
2173 
2174 	wait_event(cur_trans->writer_wait,
2175 		   extwriter_counter_read(cur_trans) == 0);
2176 
2177 	/* some pending stuffs might be added after the previous flush. */
2178 	ret = btrfs_run_delayed_items(trans);
2179 	if (ret)
2180 		goto cleanup_transaction;
2181 
2182 	btrfs_wait_delalloc_flush(fs_info);
2183 
2184 	/*
2185 	 * Wait for all ordered extents started by a fast fsync that joined this
2186 	 * transaction. Otherwise if this transaction commits before the ordered
2187 	 * extents complete we lose logged data after a power failure.
2188 	 */
2189 	wait_event(cur_trans->pending_wait,
2190 		   atomic_read(&cur_trans->pending_ordered) == 0);
2191 
2192 	btrfs_scrub_pause(fs_info);
2193 	/*
2194 	 * Ok now we need to make sure to block out any other joins while we
2195 	 * commit the transaction.  We could have started a join before setting
2196 	 * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2197 	 */
2198 	spin_lock(&fs_info->trans_lock);
2199 	cur_trans->state = TRANS_STATE_COMMIT_DOING;
2200 	spin_unlock(&fs_info->trans_lock);
2201 	wait_event(cur_trans->writer_wait,
2202 		   atomic_read(&cur_trans->num_writers) == 1);
2203 
2204 	if (TRANS_ABORTED(cur_trans)) {
2205 		ret = cur_trans->aborted;
2206 		goto scrub_continue;
2207 	}
2208 	/*
2209 	 * the reloc mutex makes sure that we stop
2210 	 * the balancing code from coming in and moving
2211 	 * extents around in the middle of the commit
2212 	 */
2213 	mutex_lock(&fs_info->reloc_mutex);
2214 
2215 	/*
2216 	 * We needn't worry about the delayed items because we will
2217 	 * deal with them in create_pending_snapshot(), which is the
2218 	 * core function of the snapshot creation.
2219 	 */
2220 	ret = create_pending_snapshots(trans);
2221 	if (ret)
2222 		goto unlock_reloc;
2223 
2224 	/*
2225 	 * We insert the dir indexes of the snapshots and update the inode
2226 	 * of the snapshots' parents after the snapshot creation, so there
2227 	 * are some delayed items which are not dealt with. Now deal with
2228 	 * them.
2229 	 *
2230 	 * We needn't worry that this operation will corrupt the snapshots,
2231 	 * because all the tree which are snapshoted will be forced to COW
2232 	 * the nodes and leaves.
2233 	 */
2234 	ret = btrfs_run_delayed_items(trans);
2235 	if (ret)
2236 		goto unlock_reloc;
2237 
2238 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2239 	if (ret)
2240 		goto unlock_reloc;
2241 
2242 	/*
2243 	 * make sure none of the code above managed to slip in a
2244 	 * delayed item
2245 	 */
2246 	btrfs_assert_delayed_root_empty(fs_info);
2247 
2248 	WARN_ON(cur_trans != trans->transaction);
2249 
2250 	/* btrfs_commit_tree_roots is responsible for getting the
2251 	 * various roots consistent with each other.  Every pointer
2252 	 * in the tree of tree roots has to point to the most up to date
2253 	 * root for every subvolume and other tree.  So, we have to keep
2254 	 * the tree logging code from jumping in and changing any
2255 	 * of the trees.
2256 	 *
2257 	 * At this point in the commit, there can't be any tree-log
2258 	 * writers, but a little lower down we drop the trans mutex
2259 	 * and let new people in.  By holding the tree_log_mutex
2260 	 * from now until after the super is written, we avoid races
2261 	 * with the tree-log code.
2262 	 */
2263 	mutex_lock(&fs_info->tree_log_mutex);
2264 
2265 	ret = commit_fs_roots(trans);
2266 	if (ret)
2267 		goto unlock_tree_log;
2268 
2269 	/*
2270 	 * Since the transaction is done, we can apply the pending changes
2271 	 * before the next transaction.
2272 	 */
2273 	btrfs_apply_pending_changes(fs_info);
2274 
2275 	/* commit_fs_roots gets rid of all the tree log roots, it is now
2276 	 * safe to free the root of tree log roots
2277 	 */
2278 	btrfs_free_log_root_tree(trans, fs_info);
2279 
2280 	/*
2281 	 * Since fs roots are all committed, we can get a quite accurate
2282 	 * new_roots. So let's do quota accounting.
2283 	 */
2284 	ret = btrfs_qgroup_account_extents(trans);
2285 	if (ret < 0)
2286 		goto unlock_tree_log;
2287 
2288 	ret = commit_cowonly_roots(trans);
2289 	if (ret)
2290 		goto unlock_tree_log;
2291 
2292 	/*
2293 	 * The tasks which save the space cache and inode cache may also
2294 	 * update ->aborted, check it.
2295 	 */
2296 	if (TRANS_ABORTED(cur_trans)) {
2297 		ret = cur_trans->aborted;
2298 		goto unlock_tree_log;
2299 	}
2300 
2301 	cur_trans = fs_info->running_transaction;
2302 
2303 	btrfs_set_root_node(&fs_info->tree_root->root_item,
2304 			    fs_info->tree_root->node);
2305 	list_add_tail(&fs_info->tree_root->dirty_list,
2306 		      &cur_trans->switch_commits);
2307 
2308 	btrfs_set_root_node(&fs_info->chunk_root->root_item,
2309 			    fs_info->chunk_root->node);
2310 	list_add_tail(&fs_info->chunk_root->dirty_list,
2311 		      &cur_trans->switch_commits);
2312 
2313 	switch_commit_roots(trans);
2314 
2315 	ASSERT(list_empty(&cur_trans->dirty_bgs));
2316 	ASSERT(list_empty(&cur_trans->io_bgs));
2317 	update_super_roots(fs_info);
2318 
2319 	btrfs_set_super_log_root(fs_info->super_copy, 0);
2320 	btrfs_set_super_log_root_level(fs_info->super_copy, 0);
2321 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2322 	       sizeof(*fs_info->super_copy));
2323 
2324 	btrfs_commit_device_sizes(cur_trans);
2325 
2326 	clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
2327 	clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
2328 
2329 	btrfs_trans_release_chunk_metadata(trans);
2330 
2331 	spin_lock(&fs_info->trans_lock);
2332 	cur_trans->state = TRANS_STATE_UNBLOCKED;
2333 	fs_info->running_transaction = NULL;
2334 	spin_unlock(&fs_info->trans_lock);
2335 	mutex_unlock(&fs_info->reloc_mutex);
2336 
2337 	wake_up(&fs_info->transaction_wait);
2338 
2339 	ret = btrfs_write_and_wait_transaction(trans);
2340 	if (ret) {
2341 		btrfs_handle_fs_error(fs_info, ret,
2342 				      "Error while writing out transaction");
2343 		/*
2344 		 * reloc_mutex has been unlocked, tree_log_mutex is still held
2345 		 * but we can't jump to unlock_tree_log causing double unlock
2346 		 */
2347 		mutex_unlock(&fs_info->tree_log_mutex);
2348 		goto scrub_continue;
2349 	}
2350 
2351 	/*
2352 	 * At this point, we should have written all the tree blocks allocated
2353 	 * in this transaction. So it's now safe to free the redirtyied extent
2354 	 * buffers.
2355 	 */
2356 	btrfs_free_redirty_list(cur_trans);
2357 
2358 	ret = write_all_supers(fs_info, 0);
2359 	/*
2360 	 * the super is written, we can safely allow the tree-loggers
2361 	 * to go about their business
2362 	 */
2363 	mutex_unlock(&fs_info->tree_log_mutex);
2364 	if (ret)
2365 		goto scrub_continue;
2366 
2367 	/*
2368 	 * We needn't acquire the lock here because there is no other task
2369 	 * which can change it.
2370 	 */
2371 	cur_trans->state = TRANS_STATE_SUPER_COMMITTED;
2372 	wake_up(&cur_trans->commit_wait);
2373 
2374 	btrfs_finish_extent_commit(trans);
2375 
2376 	if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
2377 		btrfs_clear_space_info_full(fs_info);
2378 
2379 	fs_info->last_trans_committed = cur_trans->transid;
2380 	/*
2381 	 * We needn't acquire the lock here because there is no other task
2382 	 * which can change it.
2383 	 */
2384 	cur_trans->state = TRANS_STATE_COMPLETED;
2385 	wake_up(&cur_trans->commit_wait);
2386 
2387 	spin_lock(&fs_info->trans_lock);
2388 	list_del_init(&cur_trans->list);
2389 	spin_unlock(&fs_info->trans_lock);
2390 
2391 	btrfs_put_transaction(cur_trans);
2392 	btrfs_put_transaction(cur_trans);
2393 
2394 	if (trans->type & __TRANS_FREEZABLE)
2395 		sb_end_intwrite(fs_info->sb);
2396 
2397 	trace_btrfs_transaction_commit(trans->root);
2398 
2399 	btrfs_scrub_continue(fs_info);
2400 
2401 	if (current->journal_info == trans)
2402 		current->journal_info = NULL;
2403 
2404 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
2405 
2406 	return ret;
2407 
2408 unlock_tree_log:
2409 	mutex_unlock(&fs_info->tree_log_mutex);
2410 unlock_reloc:
2411 	mutex_unlock(&fs_info->reloc_mutex);
2412 scrub_continue:
2413 	btrfs_scrub_continue(fs_info);
2414 cleanup_transaction:
2415 	btrfs_trans_release_metadata(trans);
2416 	btrfs_cleanup_pending_block_groups(trans);
2417 	btrfs_trans_release_chunk_metadata(trans);
2418 	trans->block_rsv = NULL;
2419 	btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
2420 	if (current->journal_info == trans)
2421 		current->journal_info = NULL;
2422 	cleanup_transaction(trans, ret);
2423 
2424 	return ret;
2425 }
2426 
2427 /*
2428  * return < 0 if error
2429  * 0 if there are no more dead_roots at the time of call
2430  * 1 there are more to be processed, call me again
2431  *
2432  * The return value indicates there are certainly more snapshots to delete, but
2433  * if there comes a new one during processing, it may return 0. We don't mind,
2434  * because btrfs_commit_super will poke cleaner thread and it will process it a
2435  * few seconds later.
2436  */
btrfs_clean_one_deleted_snapshot(struct btrfs_root * root)2437 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
2438 {
2439 	int ret;
2440 	struct btrfs_fs_info *fs_info = root->fs_info;
2441 
2442 	spin_lock(&fs_info->trans_lock);
2443 	if (list_empty(&fs_info->dead_roots)) {
2444 		spin_unlock(&fs_info->trans_lock);
2445 		return 0;
2446 	}
2447 	root = list_first_entry(&fs_info->dead_roots,
2448 			struct btrfs_root, root_list);
2449 	list_del_init(&root->root_list);
2450 	spin_unlock(&fs_info->trans_lock);
2451 
2452 	btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
2453 
2454 	btrfs_kill_all_delayed_nodes(root);
2455 
2456 	if (btrfs_header_backref_rev(root->node) <
2457 			BTRFS_MIXED_BACKREF_REV)
2458 		ret = btrfs_drop_snapshot(root, 0, 0);
2459 	else
2460 		ret = btrfs_drop_snapshot(root, 1, 0);
2461 
2462 	btrfs_put_root(root);
2463 	return (ret < 0) ? 0 : 1;
2464 }
2465 
btrfs_apply_pending_changes(struct btrfs_fs_info * fs_info)2466 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
2467 {
2468 	unsigned long prev;
2469 	unsigned long bit;
2470 
2471 	prev = xchg(&fs_info->pending_changes, 0);
2472 	if (!prev)
2473 		return;
2474 
2475 	bit = 1 << BTRFS_PENDING_COMMIT;
2476 	if (prev & bit)
2477 		btrfs_debug(fs_info, "pending commit done");
2478 	prev &= ~bit;
2479 
2480 	if (prev)
2481 		btrfs_warn(fs_info,
2482 			"unknown pending changes left 0x%lx, ignoring", prev);
2483 }
2484