1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #ifndef BTRFS_TRANSACTION_H
7 #define BTRFS_TRANSACTION_H
8
9 #include <linux/refcount.h>
10 #include "btrfs_inode.h"
11 #include "delayed-ref.h"
12 #include "ctree.h"
13 #include "misc.h"
14
15 enum btrfs_trans_state {
16 TRANS_STATE_RUNNING,
17 TRANS_STATE_COMMIT_PREP,
18 TRANS_STATE_COMMIT_START,
19 TRANS_STATE_COMMIT_DOING,
20 TRANS_STATE_UNBLOCKED,
21 TRANS_STATE_SUPER_COMMITTED,
22 TRANS_STATE_COMPLETED,
23 TRANS_STATE_MAX,
24 };
25
26 #define BTRFS_TRANS_HAVE_FREE_BGS 0
27 #define BTRFS_TRANS_DIRTY_BG_RUN 1
28 #define BTRFS_TRANS_CACHE_ENOSPC 2
29
30 struct btrfs_transaction {
31 u64 transid;
32 /*
33 * total external writers(USERSPACE/START/ATTACH) in this
34 * transaction, it must be zero before the transaction is
35 * being committed
36 */
37 atomic_t num_extwriters;
38 /*
39 * total writers in this transaction, it must be zero before the
40 * transaction can end
41 */
42 atomic_t num_writers;
43 refcount_t use_count;
44
45 unsigned long flags;
46
47 /* Be protected by fs_info->trans_lock when we want to change it. */
48 enum btrfs_trans_state state;
49 int aborted;
50 struct list_head list;
51 struct extent_io_tree dirty_pages;
52 time64_t start_time;
53 wait_queue_head_t writer_wait;
54 wait_queue_head_t commit_wait;
55 struct list_head pending_snapshots;
56 struct list_head dev_update_list;
57 struct list_head switch_commits;
58 struct list_head dirty_bgs;
59
60 /*
61 * There is no explicit lock which protects io_bgs, rather its
62 * consistency is implied by the fact that all the sites which modify
63 * it do so under some form of transaction critical section, namely:
64 *
65 * - btrfs_start_dirty_block_groups - This function can only ever be
66 * run by one of the transaction committers. Refer to
67 * BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction
68 *
69 * - btrfs_write_dirty_blockgroups - this is called by
70 * commit_cowonly_roots from transaction critical section
71 * (TRANS_STATE_COMMIT_DOING)
72 *
73 * - btrfs_cleanup_dirty_bgs - called on transaction abort
74 */
75 struct list_head io_bgs;
76 struct list_head dropped_roots;
77 struct extent_io_tree pinned_extents;
78
79 /*
80 * we need to make sure block group deletion doesn't race with
81 * free space cache writeout. This mutex keeps them from stomping
82 * on each other
83 */
84 struct mutex cache_write_mutex;
85 spinlock_t dirty_bgs_lock;
86 /* Protected by spin lock fs_info->unused_bgs_lock. */
87 struct list_head deleted_bgs;
88 spinlock_t dropped_roots_lock;
89 struct btrfs_delayed_ref_root delayed_refs;
90 struct btrfs_fs_info *fs_info;
91
92 /*
93 * Number of ordered extents the transaction must wait for before
94 * committing. These are ordered extents started by a fast fsync.
95 */
96 atomic_t pending_ordered;
97 wait_queue_head_t pending_wait;
98 };
99
100 enum {
101 ENUM_BIT(__TRANS_FREEZABLE),
102 ENUM_BIT(__TRANS_START),
103 ENUM_BIT(__TRANS_ATTACH),
104 ENUM_BIT(__TRANS_JOIN),
105 ENUM_BIT(__TRANS_JOIN_NOLOCK),
106 ENUM_BIT(__TRANS_DUMMY),
107 ENUM_BIT(__TRANS_JOIN_NOSTART),
108 };
109
110 #define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
111 #define TRANS_ATTACH (__TRANS_ATTACH)
112 #define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
113 #define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
114 #define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
115
116 #define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
117
118 struct btrfs_trans_handle {
119 u64 transid;
120 u64 bytes_reserved;
121 u64 chunk_bytes_reserved;
122 unsigned long delayed_ref_updates;
123 struct btrfs_transaction *transaction;
124 struct btrfs_block_rsv *block_rsv;
125 struct btrfs_block_rsv *orig_rsv;
126 /* Set by a task that wants to create a snapshot. */
127 struct btrfs_pending_snapshot *pending_snapshot;
128 refcount_t use_count;
129 unsigned int type;
130 /*
131 * Error code of transaction abort, set outside of locks and must use
132 * the READ_ONCE/WRITE_ONCE access
133 */
134 short aborted;
135 bool adding_csums;
136 bool allocating_chunk;
137 bool removing_chunk;
138 bool reloc_reserved;
139 bool in_fsync;
140 struct btrfs_fs_info *fs_info;
141 struct list_head new_bgs;
142 };
143
144 /*
145 * The abort status can be changed between calls and is not protected by locks.
146 * This accepts btrfs_transaction and btrfs_trans_handle as types. Once it's
147 * set to a non-zero value it does not change, so the macro should be in checks
148 * but is not necessary for further reads of the value.
149 */
150 #define TRANS_ABORTED(trans) (unlikely(READ_ONCE((trans)->aborted)))
151
152 struct btrfs_pending_snapshot {
153 struct dentry *dentry;
154 struct inode *dir;
155 struct btrfs_root *root;
156 struct btrfs_root_item *root_item;
157 struct btrfs_root *snap;
158 struct btrfs_qgroup_inherit *inherit;
159 struct btrfs_path *path;
160 /* block reservation for the operation */
161 struct btrfs_block_rsv block_rsv;
162 /* extra metadata reservation for relocation */
163 int error;
164 /* Preallocated anonymous block device number */
165 dev_t anon_dev;
166 bool readonly;
167 struct list_head list;
168 };
169
btrfs_set_inode_last_trans(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)170 static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
171 struct btrfs_inode *inode)
172 {
173 spin_lock(&inode->lock);
174 inode->last_trans = trans->transaction->transid;
175 inode->last_sub_trans = inode->root->log_transid;
176 inode->last_log_commit = inode->last_sub_trans - 1;
177 spin_unlock(&inode->lock);
178 }
179
180 /*
181 * Make qgroup codes to skip given qgroupid, means the old/new_roots for
182 * qgroup won't contain the qgroupid in it.
183 */
btrfs_set_skip_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)184 static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans,
185 u64 qgroupid)
186 {
187 struct btrfs_delayed_ref_root *delayed_refs;
188
189 delayed_refs = &trans->transaction->delayed_refs;
190 WARN_ON(delayed_refs->qgroup_to_skip);
191 delayed_refs->qgroup_to_skip = qgroupid;
192 }
193
btrfs_clear_skip_qgroup(struct btrfs_trans_handle * trans)194 static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
195 {
196 struct btrfs_delayed_ref_root *delayed_refs;
197
198 delayed_refs = &trans->transaction->delayed_refs;
199 WARN_ON(!delayed_refs->qgroup_to_skip);
200 delayed_refs->qgroup_to_skip = 0;
201 }
202
203 bool __cold abort_should_print_stack(int errno);
204
205 /*
206 * Call btrfs_abort_transaction as early as possible when an error condition is
207 * detected, that way the exact stack trace is reported for some errors.
208 */
209 #define btrfs_abort_transaction(trans, errno) \
210 do { \
211 bool first = false; \
212 /* Report first abort since mount */ \
213 if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
214 &((trans)->fs_info->fs_state))) { \
215 first = true; \
216 if (WARN(abort_should_print_stack(errno), \
217 KERN_ERR \
218 "BTRFS: Transaction aborted (error %d)\n", \
219 (errno))) { \
220 /* Stack trace printed. */ \
221 } else { \
222 btrfs_err((trans)->fs_info, \
223 "Transaction aborted (error %d)", \
224 (errno)); \
225 } \
226 } \
227 __btrfs_abort_transaction((trans), __func__, \
228 __LINE__, (errno), first); \
229 } while (0)
230
231 int btrfs_end_transaction(struct btrfs_trans_handle *trans);
232 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
233 unsigned int num_items);
234 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
235 struct btrfs_root *root,
236 unsigned int num_items);
237 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
238 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root);
239 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
240 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
241 struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
242 struct btrfs_root *root);
243 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
244
245 void btrfs_add_dead_root(struct btrfs_root *root);
246 int btrfs_defrag_root(struct btrfs_root *root);
247 void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info);
248 int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info);
249 int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
250 void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans);
251 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
252 bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
253 void btrfs_throttle(struct btrfs_fs_info *fs_info);
254 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
255 struct btrfs_root *root);
256 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
257 struct extent_io_tree *dirty_pages, int mark);
258 int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
259 int btrfs_transaction_blocked(struct btrfs_fs_info *info);
260 int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
261 void btrfs_put_transaction(struct btrfs_transaction *transaction);
262 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
263 struct btrfs_root *root);
264 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
265 void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
266 const char *function,
267 unsigned int line, int errno, bool first_hit);
268
269 int __init btrfs_transaction_init(void);
270 void __cold btrfs_transaction_exit(void);
271
272 #endif
273