Home
last modified time | relevance | path

Searched refs:delayed_refs (Results 1 – 10 of 10) sorted by relevance

/Linux-v5.4/fs/btrfs/
Ddelayed-ref.c56 atomic_read(&trans->transaction->delayed_refs.num_entries); in btrfs_should_throttle_delayed_refs()
394 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs, in btrfs_delayed_ref_lock() argument
397 lockdep_assert_held(&delayed_refs->lock); in btrfs_delayed_ref_lock()
402 spin_unlock(&delayed_refs->lock); in btrfs_delayed_ref_lock()
405 spin_lock(&delayed_refs->lock); in btrfs_delayed_ref_lock()
416 struct btrfs_delayed_ref_root *delayed_refs, in drop_delayed_ref() argument
427 atomic_dec(&delayed_refs->num_entries); in drop_delayed_ref()
431 struct btrfs_delayed_ref_root *delayed_refs, in merge_ref() argument
460 drop_delayed_ref(trans, delayed_refs, head, next); in merge_ref()
463 drop_delayed_ref(trans, delayed_refs, head, ref); in merge_ref()
[all …]
Dtransaction.h86 struct btrfs_delayed_ref_root delayed_refs; member
163 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_set_skip_qgroup() local
165 delayed_refs = &trans->transaction->delayed_refs; in btrfs_set_skip_qgroup()
166 WARN_ON(delayed_refs->qgroup_to_skip); in btrfs_set_skip_qgroup()
167 delayed_refs->qgroup_to_skip = qgroupid; in btrfs_set_skip_qgroup()
172 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_clear_skip_qgroup() local
174 delayed_refs = &trans->transaction->delayed_refs; in btrfs_clear_skip_qgroup()
175 WARN_ON(!delayed_refs->qgroup_to_skip); in btrfs_clear_skip_qgroup()
176 delayed_refs->qgroup_to_skip = 0; in btrfs_clear_skip_qgroup()
Ddelayed-ref.h347 struct btrfs_delayed_ref_root *delayed_refs,
351 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
353 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
359 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
363 struct btrfs_delayed_ref_root *delayed_refs);
Dextent-tree.c155 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_lookup_extent_info() local
236 delayed_refs = &trans->transaction->delayed_refs; in btrfs_lookup_extent_info()
237 spin_lock(&delayed_refs->lock); in btrfs_lookup_extent_info()
238 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); in btrfs_lookup_extent_info()
242 spin_unlock(&delayed_refs->lock); in btrfs_lookup_extent_info()
265 spin_unlock(&delayed_refs->lock); in btrfs_lookup_extent_info()
1731 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, in unselect_delayed_ref_head() argument
1734 spin_lock(&delayed_refs->lock); in unselect_delayed_ref_head()
1736 delayed_refs->num_heads_ready++; in unselect_delayed_ref_head()
1737 spin_unlock(&delayed_refs->lock); in unselect_delayed_ref_head()
[all …]
Dtransaction.c53 &transaction->delayed_refs.href_root.rb_root)); in btrfs_put_transaction()
54 if (transaction->delayed_refs.pending_csums) in btrfs_put_transaction()
57 transaction->delayed_refs.pending_csums); in btrfs_put_transaction()
233 memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs)); in join_transaction()
235 cur_trans->delayed_refs.href_root = RB_ROOT_CACHED; in join_transaction()
236 cur_trans->delayed_refs.dirty_extent_root = RB_ROOT; in join_transaction()
237 atomic_set(&cur_trans->delayed_refs.num_entries, 0); in join_transaction()
250 spin_lock_init(&cur_trans->delayed_refs.lock); in join_transaction()
802 cur_trans->delayed_refs.flushing) in btrfs_should_end_transaction()
1977 cur_trans->delayed_refs.flushing = 1; in btrfs_commit_transaction()
Dqgroup.c1544 struct btrfs_delayed_ref_root *delayed_refs, in btrfs_qgroup_trace_extent_nolock() argument
1547 struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node; in btrfs_qgroup_trace_extent_nolock()
1552 lockdep_assert_held(&delayed_refs->lock); in btrfs_qgroup_trace_extent_nolock()
1574 rb_insert_color(&record->node, &delayed_refs->dirty_extent_root); in btrfs_qgroup_trace_extent_nolock()
1610 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_qgroup_trace_extent() local
1620 delayed_refs = &trans->transaction->delayed_refs; in btrfs_qgroup_trace_extent()
1625 spin_lock(&delayed_refs->lock); in btrfs_qgroup_trace_extent()
1626 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record); in btrfs_qgroup_trace_extent()
1627 spin_unlock(&delayed_refs->lock); in btrfs_qgroup_trace_extent()
2506 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_qgroup_account_extents() local
[all …]
Dbackref.c1121 struct btrfs_delayed_ref_root *delayed_refs = NULL; local
1177 delayed_refs = &trans->transaction->delayed_refs;
1178 spin_lock(&delayed_refs->lock);
1179 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1183 spin_unlock(&delayed_refs->lock);
1196 spin_unlock(&delayed_refs->lock);
1203 spin_unlock(&delayed_refs->lock);
Dqgroup.h266 struct btrfs_delayed_ref_root *delayed_refs,
Ddisk-io.c4250 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_destroy_delayed_refs() local
4254 delayed_refs = &trans->delayed_refs; in btrfs_destroy_delayed_refs()
4256 spin_lock(&delayed_refs->lock); in btrfs_destroy_delayed_refs()
4257 if (atomic_read(&delayed_refs->num_entries) == 0) { in btrfs_destroy_delayed_refs()
4258 spin_unlock(&delayed_refs->lock); in btrfs_destroy_delayed_refs()
4263 while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) { in btrfs_destroy_delayed_refs()
4270 if (btrfs_delayed_ref_lock(delayed_refs, head)) in btrfs_destroy_delayed_refs()
4282 atomic_dec(&delayed_refs->num_entries); in btrfs_destroy_delayed_refs()
4288 btrfs_delete_ref_head(delayed_refs, head); in btrfs_destroy_delayed_refs()
4290 spin_unlock(&delayed_refs->lock); in btrfs_destroy_delayed_refs()
[all …]
Dctree.h2404 struct btrfs_delayed_ref_root *delayed_refs,