1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "misc.h"
4 #include "ctree.h"
5 #include "space-info.h"
6 #include "sysfs.h"
7 #include "volumes.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
12 #include "zoned.h"
13
14 /*
15 * HOW DOES SPACE RESERVATION WORK
16 *
17 * If you want to know about delalloc specifically, there is a separate comment
18 * for that with the delalloc code. This comment is about how the whole system
19 * works generally.
20 *
21 * BASIC CONCEPTS
22 *
23 * 1) space_info. This is the ultimate arbiter of how much space we can use.
24 * There's a description of the bytes_ fields with the struct declaration,
25 * refer to that for specifics on each field. Suffice it to say that for
26 * reservations we care about total_bytes - SUM(space_info->bytes_) when
27 * determining if there is space to make an allocation. There is a space_info
28 * for METADATA, SYSTEM, and DATA areas.
29 *
30 * 2) block_rsv's. These are basically buckets for every different type of
31 * metadata reservation we have. You can see the comment in the block_rsv
32 * code on the rules for each type, but generally block_rsv->reserved is how
33 * much space is accounted for in space_info->bytes_may_use.
34 *
35 * 3) btrfs_calc*_size. These are the worst case calculations we used based
36 * on the number of items we will want to modify. We have one for changing
37 * items, and one for inserting new items. Generally we use these helpers to
38 * determine the size of the block reserves, and then use the actual bytes
39 * values to adjust the space_info counters.
40 *
41 * MAKING RESERVATIONS, THE NORMAL CASE
42 *
43 * We call into either btrfs_reserve_data_bytes() or
44 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
45 * num_bytes we want to reserve.
46 *
47 * ->reserve
48 * space_info->bytes_may_reserve += num_bytes
49 *
50 * ->extent allocation
51 * Call btrfs_add_reserved_bytes() which does
52 * space_info->bytes_may_reserve -= num_bytes
53 * space_info->bytes_reserved += extent_bytes
54 *
55 * ->insert reference
56 * Call btrfs_update_block_group() which does
57 * space_info->bytes_reserved -= extent_bytes
58 * space_info->bytes_used += extent_bytes
59 *
60 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
61 *
62 * Assume we are unable to simply make the reservation because we do not have
63 * enough space
64 *
65 * -> __reserve_bytes
66 * create a reserve_ticket with ->bytes set to our reservation, add it to
67 * the tail of space_info->tickets, kick async flush thread
68 *
69 * ->handle_reserve_ticket
70 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
71 * on the ticket.
72 *
73 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
74 * Flushes various things attempting to free up space.
75 *
76 * -> btrfs_try_granting_tickets()
77 * This is called by anything that either subtracts space from
78 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
79 * space_info->total_bytes. This loops through the ->priority_tickets and
80 * then the ->tickets list checking to see if the reservation can be
81 * completed. If it can the space is added to space_info->bytes_may_use and
82 * the ticket is woken up.
83 *
84 * -> ticket wakeup
85 * Check if ->bytes == 0, if it does we got our reservation and we can carry
86 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we
87 * were interrupted.)
88 *
89 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
90 *
91 * Same as the above, except we add ourselves to the
92 * space_info->priority_tickets, and we do not use ticket->wait, we simply
93 * call flush_space() ourselves for the states that are safe for us to call
94 * without deadlocking and hope for the best.
95 *
96 * THE FLUSHING STATES
97 *
98 * Generally speaking we will have two cases for each state, a "nice" state
99 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to
100 * reduce the locking over head on the various trees, and even to keep from
101 * doing any work at all in the case of delayed refs. Each of these delayed
102 * things however hold reservations, and so letting them run allows us to
103 * reclaim space so we can make new reservations.
104 *
105 * FLUSH_DELAYED_ITEMS
106 * Every inode has a delayed item to update the inode. Take a simple write
107 * for example, we would update the inode item at write time to update the
108 * mtime, and then again at finish_ordered_io() time in order to update the
109 * isize or bytes. We keep these delayed items to coalesce these operations
110 * into a single operation done on demand. These are an easy way to reclaim
111 * metadata space.
112 *
113 * FLUSH_DELALLOC
114 * Look at the delalloc comment to get an idea of how much space is reserved
115 * for delayed allocation. We can reclaim some of this space simply by
116 * running delalloc, but usually we need to wait for ordered extents to
117 * reclaim the bulk of this space.
118 *
119 * FLUSH_DELAYED_REFS
120 * We have a block reserve for the outstanding delayed refs space, and every
121 * delayed ref operation holds a reservation. Running these is a quick way
122 * to reclaim space, but we want to hold this until the end because COW can
123 * churn a lot and we can avoid making some extent tree modifications if we
124 * are able to delay for as long as possible.
125 *
126 * ALLOC_CHUNK
127 * We will skip this the first time through space reservation, because of
128 * overcommit and we don't want to have a lot of useless metadata space when
129 * our worst case reservations will likely never come true.
130 *
131 * RUN_DELAYED_IPUTS
132 * If we're freeing inodes we're likely freeing checksums, file extent
133 * items, and extent tree items. Loads of space could be freed up by these
134 * operations, however they won't be usable until the transaction commits.
135 *
136 * COMMIT_TRANS
137 * This will commit the transaction. Historically we had a lot of logic
138 * surrounding whether or not we'd commit the transaction, but this waits born
139 * out of a pre-tickets era where we could end up committing the transaction
140 * thousands of times in a row without making progress. Now thanks to our
141 * ticketing system we know if we're not making progress and can error
142 * everybody out after a few commits rather than burning the disk hoping for
143 * a different answer.
144 *
145 * OVERCOMMIT
146 *
147 * Because we hold so many reservations for metadata we will allow you to
148 * reserve more space than is currently free in the currently allocate
149 * metadata space. This only happens with metadata, data does not allow
150 * overcommitting.
151 *
152 * You can see the current logic for when we allow overcommit in
153 * btrfs_can_overcommit(), but it only applies to unallocated space. If there
154 * is no unallocated space to be had, all reservations are kept within the
155 * free space in the allocated metadata chunks.
156 *
157 * Because of overcommitting, you generally want to use the
158 * btrfs_can_overcommit() logic for metadata allocations, as it does the right
159 * thing with or without extra unallocated space.
160 */
161
btrfs_space_info_used(struct btrfs_space_info * s_info,bool may_use_included)162 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
163 bool may_use_included)
164 {
165 ASSERT(s_info);
166 return s_info->bytes_used + s_info->bytes_reserved +
167 s_info->bytes_pinned + s_info->bytes_readonly +
168 s_info->bytes_zone_unusable +
169 (may_use_included ? s_info->bytes_may_use : 0);
170 }
171
172 /*
173 * after adding space to the filesystem, we need to clear the full flags
174 * on all the space infos.
175 */
btrfs_clear_space_info_full(struct btrfs_fs_info * info)176 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
177 {
178 struct list_head *head = &info->space_info;
179 struct btrfs_space_info *found;
180
181 list_for_each_entry(found, head, list)
182 found->full = 0;
183 }
184
185 /*
186 * Block groups with more than this value (percents) of unusable space will be
187 * scheduled for background reclaim.
188 */
189 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75)
190
191 /*
192 * Calculate chunk size depending on volume type (regular or zoned).
193 */
calc_chunk_size(const struct btrfs_fs_info * fs_info,u64 flags)194 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
195 {
196 if (btrfs_is_zoned(fs_info))
197 return fs_info->zone_size;
198
199 ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
200
201 if (flags & BTRFS_BLOCK_GROUP_DATA)
202 return BTRFS_MAX_DATA_CHUNK_SIZE;
203 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
204 return SZ_32M;
205
206 /* Handle BTRFS_BLOCK_GROUP_METADATA */
207 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
208 return SZ_1G;
209
210 return SZ_256M;
211 }
212
213 /*
214 * Update default chunk size.
215 */
btrfs_update_space_info_chunk_size(struct btrfs_space_info * space_info,u64 chunk_size)216 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
217 u64 chunk_size)
218 {
219 WRITE_ONCE(space_info->chunk_size, chunk_size);
220 }
221
create_space_info(struct btrfs_fs_info * info,u64 flags)222 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
223 {
224
225 struct btrfs_space_info *space_info;
226 int i;
227 int ret;
228
229 space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
230 if (!space_info)
231 return -ENOMEM;
232
233 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
234 INIT_LIST_HEAD(&space_info->block_groups[i]);
235 init_rwsem(&space_info->groups_sem);
236 spin_lock_init(&space_info->lock);
237 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
238 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
239 INIT_LIST_HEAD(&space_info->ro_bgs);
240 INIT_LIST_HEAD(&space_info->tickets);
241 INIT_LIST_HEAD(&space_info->priority_tickets);
242 space_info->clamp = 1;
243 btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
244
245 if (btrfs_is_zoned(info))
246 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
247
248 ret = btrfs_sysfs_add_space_info_type(info, space_info);
249 if (ret)
250 return ret;
251
252 list_add(&space_info->list, &info->space_info);
253 if (flags & BTRFS_BLOCK_GROUP_DATA)
254 info->data_sinfo = space_info;
255
256 return ret;
257 }
258
btrfs_init_space_info(struct btrfs_fs_info * fs_info)259 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
260 {
261 struct btrfs_super_block *disk_super;
262 u64 features;
263 u64 flags;
264 int mixed = 0;
265 int ret;
266
267 disk_super = fs_info->super_copy;
268 if (!btrfs_super_root(disk_super))
269 return -EINVAL;
270
271 features = btrfs_super_incompat_flags(disk_super);
272 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
273 mixed = 1;
274
275 flags = BTRFS_BLOCK_GROUP_SYSTEM;
276 ret = create_space_info(fs_info, flags);
277 if (ret)
278 goto out;
279
280 if (mixed) {
281 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
282 ret = create_space_info(fs_info, flags);
283 } else {
284 flags = BTRFS_BLOCK_GROUP_METADATA;
285 ret = create_space_info(fs_info, flags);
286 if (ret)
287 goto out;
288
289 flags = BTRFS_BLOCK_GROUP_DATA;
290 ret = create_space_info(fs_info, flags);
291 }
292 out:
293 return ret;
294 }
295
btrfs_add_bg_to_space_info(struct btrfs_fs_info * info,struct btrfs_block_group * block_group)296 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
297 struct btrfs_block_group *block_group)
298 {
299 struct btrfs_space_info *found;
300 int factor, index;
301
302 factor = btrfs_bg_type_to_factor(block_group->flags);
303
304 found = btrfs_find_space_info(info, block_group->flags);
305 ASSERT(found);
306 spin_lock(&found->lock);
307 found->total_bytes += block_group->length;
308 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
309 found->active_total_bytes += block_group->length;
310 found->disk_total += block_group->length * factor;
311 found->bytes_used += block_group->used;
312 found->disk_used += block_group->used * factor;
313 found->bytes_readonly += block_group->bytes_super;
314 found->bytes_zone_unusable += block_group->zone_unusable;
315 if (block_group->length > 0)
316 found->full = 0;
317 btrfs_try_granting_tickets(info, found);
318 spin_unlock(&found->lock);
319
320 block_group->space_info = found;
321
322 index = btrfs_bg_flags_to_raid_index(block_group->flags);
323 down_write(&found->groups_sem);
324 list_add_tail(&block_group->list, &found->block_groups[index]);
325 up_write(&found->groups_sem);
326 }
327
btrfs_find_space_info(struct btrfs_fs_info * info,u64 flags)328 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
329 u64 flags)
330 {
331 struct list_head *head = &info->space_info;
332 struct btrfs_space_info *found;
333
334 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
335
336 list_for_each_entry(found, head, list) {
337 if (found->flags & flags)
338 return found;
339 }
340 return NULL;
341 }
342
calc_available_free_space(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,enum btrfs_reserve_flush_enum flush)343 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
344 struct btrfs_space_info *space_info,
345 enum btrfs_reserve_flush_enum flush)
346 {
347 u64 profile;
348 u64 avail;
349 int factor;
350
351 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
352 profile = btrfs_system_alloc_profile(fs_info);
353 else
354 profile = btrfs_metadata_alloc_profile(fs_info);
355
356 avail = atomic64_read(&fs_info->free_chunk_space);
357
358 /*
359 * If we have dup, raid1 or raid10 then only half of the free
360 * space is actually usable. For raid56, the space info used
361 * doesn't include the parity drive, so we don't have to
362 * change the math
363 */
364 factor = btrfs_bg_type_to_factor(profile);
365 avail = div_u64(avail, factor);
366
367 /*
368 * If we aren't flushing all things, let us overcommit up to
369 * 1/2th of the space. If we can flush, don't let us overcommit
370 * too much, let it overcommit up to 1/8 of the space.
371 */
372 if (flush == BTRFS_RESERVE_FLUSH_ALL)
373 avail >>= 3;
374 else
375 avail >>= 1;
376 return avail;
377 }
378
writable_total_bytes(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info)379 static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info,
380 struct btrfs_space_info *space_info)
381 {
382 /*
383 * On regular filesystem, all total_bytes are always writable. On zoned
384 * filesystem, there may be a limitation imposed by max_active_zones.
385 * For metadata allocation, we cannot finish an existing active block
386 * group to avoid a deadlock. Thus, we need to consider only the active
387 * groups to be writable for metadata space.
388 */
389 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
390 return space_info->total_bytes;
391
392 return space_info->active_total_bytes;
393 }
394
btrfs_can_overcommit(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,u64 bytes,enum btrfs_reserve_flush_enum flush)395 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
396 struct btrfs_space_info *space_info, u64 bytes,
397 enum btrfs_reserve_flush_enum flush)
398 {
399 u64 avail;
400 u64 used;
401
402 /* Don't overcommit when in mixed mode */
403 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
404 return 0;
405
406 used = btrfs_space_info_used(space_info, true);
407 if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
408 avail = 0;
409 else
410 avail = calc_available_free_space(fs_info, space_info, flush);
411
412 if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
413 return 1;
414 return 0;
415 }
416
remove_ticket(struct btrfs_space_info * space_info,struct reserve_ticket * ticket)417 static void remove_ticket(struct btrfs_space_info *space_info,
418 struct reserve_ticket *ticket)
419 {
420 if (!list_empty(&ticket->list)) {
421 list_del_init(&ticket->list);
422 ASSERT(space_info->reclaim_size >= ticket->bytes);
423 space_info->reclaim_size -= ticket->bytes;
424 }
425 }
426
427 /*
428 * This is for space we already have accounted in space_info->bytes_may_use, so
429 * basically when we're returning space from block_rsv's.
430 */
btrfs_try_granting_tickets(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info)431 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
432 struct btrfs_space_info *space_info)
433 {
434 struct list_head *head;
435 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
436
437 lockdep_assert_held(&space_info->lock);
438
439 head = &space_info->priority_tickets;
440 again:
441 while (!list_empty(head)) {
442 struct reserve_ticket *ticket;
443 u64 used = btrfs_space_info_used(space_info, true);
444
445 ticket = list_first_entry(head, struct reserve_ticket, list);
446
447 /* Check and see if our ticket can be satisfied now. */
448 if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) ||
449 btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
450 flush)) {
451 btrfs_space_info_update_bytes_may_use(fs_info,
452 space_info,
453 ticket->bytes);
454 remove_ticket(space_info, ticket);
455 ticket->bytes = 0;
456 space_info->tickets_id++;
457 wake_up(&ticket->wait);
458 } else {
459 break;
460 }
461 }
462
463 if (head == &space_info->priority_tickets) {
464 head = &space_info->tickets;
465 flush = BTRFS_RESERVE_FLUSH_ALL;
466 goto again;
467 }
468 }
469
470 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \
471 do { \
472 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
473 spin_lock(&__rsv->lock); \
474 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
475 __rsv->size, __rsv->reserved); \
476 spin_unlock(&__rsv->lock); \
477 } while (0)
478
space_info_flag_to_str(const struct btrfs_space_info * space_info)479 static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info)
480 {
481 switch (space_info->flags) {
482 case BTRFS_BLOCK_GROUP_SYSTEM:
483 return "SYSTEM";
484 case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA:
485 return "DATA+METADATA";
486 case BTRFS_BLOCK_GROUP_DATA:
487 return "DATA";
488 case BTRFS_BLOCK_GROUP_METADATA:
489 return "METADATA";
490 default:
491 return "UNKNOWN";
492 }
493 }
494
dump_global_block_rsv(struct btrfs_fs_info * fs_info)495 static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
496 {
497 DUMP_BLOCK_RSV(fs_info, global_block_rsv);
498 DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
499 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
500 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
501 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
502 }
503
__btrfs_dump_space_info(struct btrfs_fs_info * fs_info,struct btrfs_space_info * info)504 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
505 struct btrfs_space_info *info)
506 {
507 const char *flag_str = space_info_flag_to_str(info);
508 lockdep_assert_held(&info->lock);
509
510 /* The free space could be negative in case of overcommit */
511 btrfs_info(fs_info, "space_info %s has %lld free, is %sfull",
512 flag_str,
513 (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
514 info->full ? "" : "not ");
515 btrfs_info(fs_info,
516 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
517 info->total_bytes, info->bytes_used, info->bytes_pinned,
518 info->bytes_reserved, info->bytes_may_use,
519 info->bytes_readonly, info->bytes_zone_unusable);
520 }
521
btrfs_dump_space_info(struct btrfs_fs_info * fs_info,struct btrfs_space_info * info,u64 bytes,int dump_block_groups)522 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
523 struct btrfs_space_info *info, u64 bytes,
524 int dump_block_groups)
525 {
526 struct btrfs_block_group *cache;
527 int index = 0;
528
529 spin_lock(&info->lock);
530 __btrfs_dump_space_info(fs_info, info);
531 dump_global_block_rsv(fs_info);
532 spin_unlock(&info->lock);
533
534 if (!dump_block_groups)
535 return;
536
537 down_read(&info->groups_sem);
538 again:
539 list_for_each_entry(cache, &info->block_groups[index], list) {
540 spin_lock(&cache->lock);
541 btrfs_info(fs_info,
542 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
543 cache->start, cache->length, cache->used, cache->pinned,
544 cache->reserved, cache->zone_unusable,
545 cache->ro ? "[readonly]" : "");
546 spin_unlock(&cache->lock);
547 btrfs_dump_free_space(cache, bytes);
548 }
549 if (++index < BTRFS_NR_RAID_TYPES)
550 goto again;
551 up_read(&info->groups_sem);
552 }
553
calc_reclaim_items_nr(struct btrfs_fs_info * fs_info,u64 to_reclaim)554 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
555 u64 to_reclaim)
556 {
557 u64 bytes;
558 u64 nr;
559
560 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
561 nr = div64_u64(to_reclaim, bytes);
562 if (!nr)
563 nr = 1;
564 return nr;
565 }
566
567 #define EXTENT_SIZE_PER_ITEM SZ_256K
568
569 /*
570 * shrink metadata reservation for delalloc
571 */
shrink_delalloc(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,u64 to_reclaim,bool wait_ordered,bool for_preempt)572 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
573 struct btrfs_space_info *space_info,
574 u64 to_reclaim, bool wait_ordered,
575 bool for_preempt)
576 {
577 struct btrfs_trans_handle *trans;
578 u64 delalloc_bytes;
579 u64 ordered_bytes;
580 u64 items;
581 long time_left;
582 int loops;
583
584 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
585 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
586 if (delalloc_bytes == 0 && ordered_bytes == 0)
587 return;
588
589 /* Calc the number of the pages we need flush for space reservation */
590 if (to_reclaim == U64_MAX) {
591 items = U64_MAX;
592 } else {
593 /*
594 * to_reclaim is set to however much metadata we need to
595 * reclaim, but reclaiming that much data doesn't really track
596 * exactly. What we really want to do is reclaim full inode's
597 * worth of reservations, however that's not available to us
598 * here. We will take a fraction of the delalloc bytes for our
599 * flushing loops and hope for the best. Delalloc will expand
600 * the amount we write to cover an entire dirty extent, which
601 * will reclaim the metadata reservation for that range. If
602 * it's not enough subsequent flush stages will be more
603 * aggressive.
604 */
605 to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
606 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
607 }
608
609 trans = current->journal_info;
610
611 /*
612 * If we are doing more ordered than delalloc we need to just wait on
613 * ordered extents, otherwise we'll waste time trying to flush delalloc
614 * that likely won't give us the space back we need.
615 */
616 if (ordered_bytes > delalloc_bytes && !for_preempt)
617 wait_ordered = true;
618
619 loops = 0;
620 while ((delalloc_bytes || ordered_bytes) && loops < 3) {
621 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
622 long nr_pages = min_t(u64, temp, LONG_MAX);
623 int async_pages;
624
625 btrfs_start_delalloc_roots(fs_info, nr_pages, true);
626
627 /*
628 * We need to make sure any outstanding async pages are now
629 * processed before we continue. This is because things like
630 * sync_inode() try to be smart and skip writing if the inode is
631 * marked clean. We don't use filemap_fwrite for flushing
632 * because we want to control how many pages we write out at a
633 * time, thus this is the only safe way to make sure we've
634 * waited for outstanding compressed workers to have started
635 * their jobs and thus have ordered extents set up properly.
636 *
637 * This exists because we do not want to wait for each
638 * individual inode to finish its async work, we simply want to
639 * start the IO on everybody, and then come back here and wait
640 * for all of the async work to catch up. Once we're done with
641 * that we know we'll have ordered extents for everything and we
642 * can decide if we wait for that or not.
643 *
644 * If we choose to replace this in the future, make absolutely
645 * sure that the proper waiting is being done in the async case,
646 * as there have been bugs in that area before.
647 */
648 async_pages = atomic_read(&fs_info->async_delalloc_pages);
649 if (!async_pages)
650 goto skip_async;
651
652 /*
653 * We don't want to wait forever, if we wrote less pages in this
654 * loop than we have outstanding, only wait for that number of
655 * pages, otherwise we can wait for all async pages to finish
656 * before continuing.
657 */
658 if (async_pages > nr_pages)
659 async_pages -= nr_pages;
660 else
661 async_pages = 0;
662 wait_event(fs_info->async_submit_wait,
663 atomic_read(&fs_info->async_delalloc_pages) <=
664 async_pages);
665 skip_async:
666 loops++;
667 if (wait_ordered && !trans) {
668 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
669 } else {
670 time_left = schedule_timeout_killable(1);
671 if (time_left)
672 break;
673 }
674
675 /*
676 * If we are for preemption we just want a one-shot of delalloc
677 * flushing so we can stop flushing if we decide we don't need
678 * to anymore.
679 */
680 if (for_preempt)
681 break;
682
683 spin_lock(&space_info->lock);
684 if (list_empty(&space_info->tickets) &&
685 list_empty(&space_info->priority_tickets)) {
686 spin_unlock(&space_info->lock);
687 break;
688 }
689 spin_unlock(&space_info->lock);
690
691 delalloc_bytes = percpu_counter_sum_positive(
692 &fs_info->delalloc_bytes);
693 ordered_bytes = percpu_counter_sum_positive(
694 &fs_info->ordered_bytes);
695 }
696 }
697
698 /*
699 * Try to flush some data based on policy set by @state. This is only advisory
700 * and may fail for various reasons. The caller is supposed to examine the
701 * state of @space_info to detect the outcome.
702 */
flush_space(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,u64 num_bytes,enum btrfs_flush_state state,bool for_preempt)703 static void flush_space(struct btrfs_fs_info *fs_info,
704 struct btrfs_space_info *space_info, u64 num_bytes,
705 enum btrfs_flush_state state, bool for_preempt)
706 {
707 struct btrfs_root *root = fs_info->tree_root;
708 struct btrfs_trans_handle *trans;
709 int nr;
710 int ret = 0;
711
712 switch (state) {
713 case FLUSH_DELAYED_ITEMS_NR:
714 case FLUSH_DELAYED_ITEMS:
715 if (state == FLUSH_DELAYED_ITEMS_NR)
716 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
717 else
718 nr = -1;
719
720 trans = btrfs_join_transaction(root);
721 if (IS_ERR(trans)) {
722 ret = PTR_ERR(trans);
723 break;
724 }
725 ret = btrfs_run_delayed_items_nr(trans, nr);
726 btrfs_end_transaction(trans);
727 break;
728 case FLUSH_DELALLOC:
729 case FLUSH_DELALLOC_WAIT:
730 case FLUSH_DELALLOC_FULL:
731 if (state == FLUSH_DELALLOC_FULL)
732 num_bytes = U64_MAX;
733 shrink_delalloc(fs_info, space_info, num_bytes,
734 state != FLUSH_DELALLOC, for_preempt);
735 break;
736 case FLUSH_DELAYED_REFS_NR:
737 case FLUSH_DELAYED_REFS:
738 trans = btrfs_join_transaction(root);
739 if (IS_ERR(trans)) {
740 ret = PTR_ERR(trans);
741 break;
742 }
743 if (state == FLUSH_DELAYED_REFS_NR)
744 nr = calc_reclaim_items_nr(fs_info, num_bytes);
745 else
746 nr = 0;
747 btrfs_run_delayed_refs(trans, nr);
748 btrfs_end_transaction(trans);
749 break;
750 case ALLOC_CHUNK:
751 case ALLOC_CHUNK_FORCE:
752 /*
753 * For metadata space on zoned filesystem, reaching here means we
754 * don't have enough space left in active_total_bytes. Try to
755 * activate a block group first, because we may have inactive
756 * block group already allocated.
757 */
758 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, false);
759 if (ret < 0)
760 break;
761 else if (ret == 1)
762 break;
763
764 trans = btrfs_join_transaction(root);
765 if (IS_ERR(trans)) {
766 ret = PTR_ERR(trans);
767 break;
768 }
769 ret = btrfs_chunk_alloc(trans,
770 btrfs_get_alloc_profile(fs_info, space_info->flags),
771 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
772 CHUNK_ALLOC_FORCE);
773 btrfs_end_transaction(trans);
774
775 /*
776 * For metadata space on zoned filesystem, allocating a new chunk
777 * is not enough. We still need to activate the block * group.
778 * Active the newly allocated block group by (maybe) finishing
779 * a block group.
780 */
781 if (ret == 1) {
782 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true);
783 /*
784 * Revert to the original ret regardless we could finish
785 * one block group or not.
786 */
787 if (ret >= 0)
788 ret = 1;
789 }
790
791 if (ret > 0 || ret == -ENOSPC)
792 ret = 0;
793 break;
794 case RUN_DELAYED_IPUTS:
795 /*
796 * If we have pending delayed iputs then we could free up a
797 * bunch of pinned space, so make sure we run the iputs before
798 * we do our pinned bytes check below.
799 */
800 btrfs_run_delayed_iputs(fs_info);
801 btrfs_wait_on_delayed_iputs(fs_info);
802 break;
803 case COMMIT_TRANS:
804 ASSERT(current->journal_info == NULL);
805 trans = btrfs_join_transaction(root);
806 if (IS_ERR(trans)) {
807 ret = PTR_ERR(trans);
808 break;
809 }
810 ret = btrfs_commit_transaction(trans);
811 break;
812 default:
813 ret = -ENOSPC;
814 break;
815 }
816
817 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
818 ret, for_preempt);
819 return;
820 }
821
822 static inline u64
btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info)823 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
824 struct btrfs_space_info *space_info)
825 {
826 u64 used;
827 u64 avail;
828 u64 total;
829 u64 to_reclaim = space_info->reclaim_size;
830
831 lockdep_assert_held(&space_info->lock);
832
833 avail = calc_available_free_space(fs_info, space_info,
834 BTRFS_RESERVE_FLUSH_ALL);
835 used = btrfs_space_info_used(space_info, true);
836
837 /*
838 * We may be flushing because suddenly we have less space than we had
839 * before, and now we're well over-committed based on our current free
840 * space. If that's the case add in our overage so we make sure to put
841 * appropriate pressure on the flushing state machine.
842 */
843 total = writable_total_bytes(fs_info, space_info);
844 if (total + avail < used)
845 to_reclaim += used - (total + avail);
846
847 return to_reclaim;
848 }
849
need_preemptive_reclaim(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info)850 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
851 struct btrfs_space_info *space_info)
852 {
853 u64 global_rsv_size = fs_info->global_block_rsv.reserved;
854 u64 ordered, delalloc;
855 u64 total = writable_total_bytes(fs_info, space_info);
856 u64 thresh;
857 u64 used;
858
859 thresh = div_factor_fine(total, 90);
860
861 lockdep_assert_held(&space_info->lock);
862
863 /* If we're just plain full then async reclaim just slows us down. */
864 if ((space_info->bytes_used + space_info->bytes_reserved +
865 global_rsv_size) >= thresh)
866 return false;
867
868 used = space_info->bytes_may_use + space_info->bytes_pinned;
869
870 /* The total flushable belongs to the global rsv, don't flush. */
871 if (global_rsv_size >= used)
872 return false;
873
874 /*
875 * 128MiB is 1/4 of the maximum global rsv size. If we have less than
876 * that devoted to other reservations then there's no sense in flushing,
877 * we don't have a lot of things that need flushing.
878 */
879 if (used - global_rsv_size <= SZ_128M)
880 return false;
881
882 /*
883 * We have tickets queued, bail so we don't compete with the async
884 * flushers.
885 */
886 if (space_info->reclaim_size)
887 return false;
888
889 /*
890 * If we have over half of the free space occupied by reservations or
891 * pinned then we want to start flushing.
892 *
893 * We do not do the traditional thing here, which is to say
894 *
895 * if (used >= ((total_bytes + avail) / 2))
896 * return 1;
897 *
898 * because this doesn't quite work how we want. If we had more than 50%
899 * of the space_info used by bytes_used and we had 0 available we'd just
900 * constantly run the background flusher. Instead we want it to kick in
901 * if our reclaimable space exceeds our clamped free space.
902 *
903 * Our clamping range is 2^1 -> 2^8. Practically speaking that means
904 * the following:
905 *
906 * Amount of RAM Minimum threshold Maximum threshold
907 *
908 * 256GiB 1GiB 128GiB
909 * 128GiB 512MiB 64GiB
910 * 64GiB 256MiB 32GiB
911 * 32GiB 128MiB 16GiB
912 * 16GiB 64MiB 8GiB
913 *
914 * These are the range our thresholds will fall in, corresponding to how
915 * much delalloc we need for the background flusher to kick in.
916 */
917
918 thresh = calc_available_free_space(fs_info, space_info,
919 BTRFS_RESERVE_FLUSH_ALL);
920 used = space_info->bytes_used + space_info->bytes_reserved +
921 space_info->bytes_readonly + global_rsv_size;
922 if (used < total)
923 thresh += total - used;
924 thresh >>= space_info->clamp;
925
926 used = space_info->bytes_pinned;
927
928 /*
929 * If we have more ordered bytes than delalloc bytes then we're either
930 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
931 * around. Preemptive flushing is only useful in that it can free up
932 * space before tickets need to wait for things to finish. In the case
933 * of ordered extents, preemptively waiting on ordered extents gets us
934 * nothing, if our reservations are tied up in ordered extents we'll
935 * simply have to slow down writers by forcing them to wait on ordered
936 * extents.
937 *
938 * In the case that ordered is larger than delalloc, only include the
939 * block reserves that we would actually be able to directly reclaim
940 * from. In this case if we're heavy on metadata operations this will
941 * clearly be heavy enough to warrant preemptive flushing. In the case
942 * of heavy DIO or ordered reservations, preemptive flushing will just
943 * waste time and cause us to slow down.
944 *
945 * We want to make sure we truly are maxed out on ordered however, so
946 * cut ordered in half, and if it's still higher than delalloc then we
947 * can keep flushing. This is to avoid the case where we start
948 * flushing, and now delalloc == ordered and we stop preemptively
949 * flushing when we could still have several gigs of delalloc to flush.
950 */
951 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
952 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
953 if (ordered >= delalloc)
954 used += fs_info->delayed_refs_rsv.reserved +
955 fs_info->delayed_block_rsv.reserved;
956 else
957 used += space_info->bytes_may_use - global_rsv_size;
958
959 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
960 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
961 }
962
steal_from_global_rsv(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,struct reserve_ticket * ticket)963 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
964 struct btrfs_space_info *space_info,
965 struct reserve_ticket *ticket)
966 {
967 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
968 u64 min_bytes;
969
970 if (!ticket->steal)
971 return false;
972
973 if (global_rsv->space_info != space_info)
974 return false;
975
976 spin_lock(&global_rsv->lock);
977 min_bytes = div_factor(global_rsv->size, 1);
978 if (global_rsv->reserved < min_bytes + ticket->bytes) {
979 spin_unlock(&global_rsv->lock);
980 return false;
981 }
982 global_rsv->reserved -= ticket->bytes;
983 remove_ticket(space_info, ticket);
984 ticket->bytes = 0;
985 wake_up(&ticket->wait);
986 space_info->tickets_id++;
987 if (global_rsv->reserved < global_rsv->size)
988 global_rsv->full = 0;
989 spin_unlock(&global_rsv->lock);
990
991 return true;
992 }
993
994 /*
995 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
996 * @fs_info - fs_info for this fs
997 * @space_info - the space info we were flushing
998 *
999 * We call this when we've exhausted our flushing ability and haven't made
1000 * progress in satisfying tickets. The reservation code handles tickets in
1001 * order, so if there is a large ticket first and then smaller ones we could
1002 * very well satisfy the smaller tickets. This will attempt to wake up any
1003 * tickets in the list to catch this case.
1004 *
1005 * This function returns true if it was able to make progress by clearing out
1006 * other tickets, or if it stumbles across a ticket that was smaller than the
1007 * first ticket.
1008 */
maybe_fail_all_tickets(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info)1009 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
1010 struct btrfs_space_info *space_info)
1011 {
1012 struct reserve_ticket *ticket;
1013 u64 tickets_id = space_info->tickets_id;
1014 const bool aborted = BTRFS_FS_ERROR(fs_info);
1015
1016 trace_btrfs_fail_all_tickets(fs_info, space_info);
1017
1018 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1019 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
1020 __btrfs_dump_space_info(fs_info, space_info);
1021 }
1022
1023 while (!list_empty(&space_info->tickets) &&
1024 tickets_id == space_info->tickets_id) {
1025 ticket = list_first_entry(&space_info->tickets,
1026 struct reserve_ticket, list);
1027
1028 if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
1029 return true;
1030
1031 if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1032 btrfs_info(fs_info, "failing ticket with %llu bytes",
1033 ticket->bytes);
1034
1035 remove_ticket(space_info, ticket);
1036 if (aborted)
1037 ticket->error = -EIO;
1038 else
1039 ticket->error = -ENOSPC;
1040 wake_up(&ticket->wait);
1041
1042 /*
1043 * We're just throwing tickets away, so more flushing may not
1044 * trip over btrfs_try_granting_tickets, so we need to call it
1045 * here to see if we can make progress with the next ticket in
1046 * the list.
1047 */
1048 if (!aborted)
1049 btrfs_try_granting_tickets(fs_info, space_info);
1050 }
1051 return (tickets_id != space_info->tickets_id);
1052 }
1053
1054 /*
1055 * This is for normal flushers, we can wait all goddamned day if we want to. We
1056 * will loop and continuously try to flush as long as we are making progress.
1057 * We count progress as clearing off tickets each time we have to loop.
1058 */
btrfs_async_reclaim_metadata_space(struct work_struct * work)1059 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1060 {
1061 struct btrfs_fs_info *fs_info;
1062 struct btrfs_space_info *space_info;
1063 u64 to_reclaim;
1064 enum btrfs_flush_state flush_state;
1065 int commit_cycles = 0;
1066 u64 last_tickets_id;
1067
1068 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1069 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1070
1071 spin_lock(&space_info->lock);
1072 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1073 if (!to_reclaim) {
1074 space_info->flush = 0;
1075 spin_unlock(&space_info->lock);
1076 return;
1077 }
1078 last_tickets_id = space_info->tickets_id;
1079 spin_unlock(&space_info->lock);
1080
1081 flush_state = FLUSH_DELAYED_ITEMS_NR;
1082 do {
1083 flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1084 spin_lock(&space_info->lock);
1085 if (list_empty(&space_info->tickets)) {
1086 space_info->flush = 0;
1087 spin_unlock(&space_info->lock);
1088 return;
1089 }
1090 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1091 space_info);
1092 if (last_tickets_id == space_info->tickets_id) {
1093 flush_state++;
1094 } else {
1095 last_tickets_id = space_info->tickets_id;
1096 flush_state = FLUSH_DELAYED_ITEMS_NR;
1097 if (commit_cycles)
1098 commit_cycles--;
1099 }
1100
1101 /*
1102 * We do not want to empty the system of delalloc unless we're
1103 * under heavy pressure, so allow one trip through the flushing
1104 * logic before we start doing a FLUSH_DELALLOC_FULL.
1105 */
1106 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1107 flush_state++;
1108
1109 /*
1110 * We don't want to force a chunk allocation until we've tried
1111 * pretty hard to reclaim space. Think of the case where we
1112 * freed up a bunch of space and so have a lot of pinned space
1113 * to reclaim. We would rather use that than possibly create a
1114 * underutilized metadata chunk. So if this is our first run
1115 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1116 * commit the transaction. If nothing has changed the next go
1117 * around then we can force a chunk allocation.
1118 */
1119 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1120 flush_state++;
1121
1122 if (flush_state > COMMIT_TRANS) {
1123 commit_cycles++;
1124 if (commit_cycles > 2) {
1125 if (maybe_fail_all_tickets(fs_info, space_info)) {
1126 flush_state = FLUSH_DELAYED_ITEMS_NR;
1127 commit_cycles--;
1128 } else {
1129 space_info->flush = 0;
1130 }
1131 } else {
1132 flush_state = FLUSH_DELAYED_ITEMS_NR;
1133 }
1134 }
1135 spin_unlock(&space_info->lock);
1136 } while (flush_state <= COMMIT_TRANS);
1137 }
1138
1139 /*
1140 * This handles pre-flushing of metadata space before we get to the point that
1141 * we need to start blocking threads on tickets. The logic here is different
1142 * from the other flush paths because it doesn't rely on tickets to tell us how
1143 * much we need to flush, instead it attempts to keep us below the 80% full
1144 * watermark of space by flushing whichever reservation pool is currently the
1145 * largest.
1146 */
btrfs_preempt_reclaim_metadata_space(struct work_struct * work)1147 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1148 {
1149 struct btrfs_fs_info *fs_info;
1150 struct btrfs_space_info *space_info;
1151 struct btrfs_block_rsv *delayed_block_rsv;
1152 struct btrfs_block_rsv *delayed_refs_rsv;
1153 struct btrfs_block_rsv *global_rsv;
1154 struct btrfs_block_rsv *trans_rsv;
1155 int loops = 0;
1156
1157 fs_info = container_of(work, struct btrfs_fs_info,
1158 preempt_reclaim_work);
1159 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1160 delayed_block_rsv = &fs_info->delayed_block_rsv;
1161 delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1162 global_rsv = &fs_info->global_block_rsv;
1163 trans_rsv = &fs_info->trans_block_rsv;
1164
1165 spin_lock(&space_info->lock);
1166 while (need_preemptive_reclaim(fs_info, space_info)) {
1167 enum btrfs_flush_state flush;
1168 u64 delalloc_size = 0;
1169 u64 to_reclaim, block_rsv_size;
1170 u64 global_rsv_size = global_rsv->reserved;
1171
1172 loops++;
1173
1174 /*
1175 * We don't have a precise counter for the metadata being
1176 * reserved for delalloc, so we'll approximate it by subtracting
1177 * out the block rsv's space from the bytes_may_use. If that
1178 * amount is higher than the individual reserves, then we can
1179 * assume it's tied up in delalloc reservations.
1180 */
1181 block_rsv_size = global_rsv_size +
1182 delayed_block_rsv->reserved +
1183 delayed_refs_rsv->reserved +
1184 trans_rsv->reserved;
1185 if (block_rsv_size < space_info->bytes_may_use)
1186 delalloc_size = space_info->bytes_may_use - block_rsv_size;
1187
1188 /*
1189 * We don't want to include the global_rsv in our calculation,
1190 * because that's space we can't touch. Subtract it from the
1191 * block_rsv_size for the next checks.
1192 */
1193 block_rsv_size -= global_rsv_size;
1194
1195 /*
1196 * We really want to avoid flushing delalloc too much, as it
1197 * could result in poor allocation patterns, so only flush it if
1198 * it's larger than the rest of the pools combined.
1199 */
1200 if (delalloc_size > block_rsv_size) {
1201 to_reclaim = delalloc_size;
1202 flush = FLUSH_DELALLOC;
1203 } else if (space_info->bytes_pinned >
1204 (delayed_block_rsv->reserved +
1205 delayed_refs_rsv->reserved)) {
1206 to_reclaim = space_info->bytes_pinned;
1207 flush = COMMIT_TRANS;
1208 } else if (delayed_block_rsv->reserved >
1209 delayed_refs_rsv->reserved) {
1210 to_reclaim = delayed_block_rsv->reserved;
1211 flush = FLUSH_DELAYED_ITEMS_NR;
1212 } else {
1213 to_reclaim = delayed_refs_rsv->reserved;
1214 flush = FLUSH_DELAYED_REFS_NR;
1215 }
1216
1217 spin_unlock(&space_info->lock);
1218
1219 /*
1220 * We don't want to reclaim everything, just a portion, so scale
1221 * down the to_reclaim by 1/4. If it takes us down to 0,
1222 * reclaim 1 items worth.
1223 */
1224 to_reclaim >>= 2;
1225 if (!to_reclaim)
1226 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1227 flush_space(fs_info, space_info, to_reclaim, flush, true);
1228 cond_resched();
1229 spin_lock(&space_info->lock);
1230 }
1231
1232 /* We only went through once, back off our clamping. */
1233 if (loops == 1 && !space_info->reclaim_size)
1234 space_info->clamp = max(1, space_info->clamp - 1);
1235 trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1236 spin_unlock(&space_info->lock);
1237 }
1238
1239 /*
1240 * FLUSH_DELALLOC_WAIT:
1241 * Space is freed from flushing delalloc in one of two ways.
1242 *
1243 * 1) compression is on and we allocate less space than we reserved
1244 * 2) we are overwriting existing space
1245 *
1246 * For #1 that extra space is reclaimed as soon as the delalloc pages are
1247 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1248 * length to ->bytes_reserved, and subtracts the reserved space from
1249 * ->bytes_may_use.
1250 *
1251 * For #2 this is trickier. Once the ordered extent runs we will drop the
1252 * extent in the range we are overwriting, which creates a delayed ref for
1253 * that freed extent. This however is not reclaimed until the transaction
1254 * commits, thus the next stages.
1255 *
1256 * RUN_DELAYED_IPUTS
1257 * If we are freeing inodes, we want to make sure all delayed iputs have
1258 * completed, because they could have been on an inode with i_nlink == 0, and
1259 * thus have been truncated and freed up space. But again this space is not
1260 * immediately re-usable, it comes in the form of a delayed ref, which must be
1261 * run and then the transaction must be committed.
1262 *
1263 * COMMIT_TRANS
1264 * This is where we reclaim all of the pinned space generated by running the
1265 * iputs
1266 *
1267 * ALLOC_CHUNK_FORCE
1268 * For data we start with alloc chunk force, however we could have been full
1269 * before, and then the transaction commit could have freed new block groups,
1270 * so if we now have space to allocate do the force chunk allocation.
1271 */
1272 static const enum btrfs_flush_state data_flush_states[] = {
1273 FLUSH_DELALLOC_FULL,
1274 RUN_DELAYED_IPUTS,
1275 COMMIT_TRANS,
1276 ALLOC_CHUNK_FORCE,
1277 };
1278
btrfs_async_reclaim_data_space(struct work_struct * work)1279 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1280 {
1281 struct btrfs_fs_info *fs_info;
1282 struct btrfs_space_info *space_info;
1283 u64 last_tickets_id;
1284 enum btrfs_flush_state flush_state = 0;
1285
1286 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1287 space_info = fs_info->data_sinfo;
1288
1289 spin_lock(&space_info->lock);
1290 if (list_empty(&space_info->tickets)) {
1291 space_info->flush = 0;
1292 spin_unlock(&space_info->lock);
1293 return;
1294 }
1295 last_tickets_id = space_info->tickets_id;
1296 spin_unlock(&space_info->lock);
1297
1298 while (!space_info->full) {
1299 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1300 spin_lock(&space_info->lock);
1301 if (list_empty(&space_info->tickets)) {
1302 space_info->flush = 0;
1303 spin_unlock(&space_info->lock);
1304 return;
1305 }
1306
1307 /* Something happened, fail everything and bail. */
1308 if (BTRFS_FS_ERROR(fs_info))
1309 goto aborted_fs;
1310 last_tickets_id = space_info->tickets_id;
1311 spin_unlock(&space_info->lock);
1312 }
1313
1314 while (flush_state < ARRAY_SIZE(data_flush_states)) {
1315 flush_space(fs_info, space_info, U64_MAX,
1316 data_flush_states[flush_state], false);
1317 spin_lock(&space_info->lock);
1318 if (list_empty(&space_info->tickets)) {
1319 space_info->flush = 0;
1320 spin_unlock(&space_info->lock);
1321 return;
1322 }
1323
1324 if (last_tickets_id == space_info->tickets_id) {
1325 flush_state++;
1326 } else {
1327 last_tickets_id = space_info->tickets_id;
1328 flush_state = 0;
1329 }
1330
1331 if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1332 if (space_info->full) {
1333 if (maybe_fail_all_tickets(fs_info, space_info))
1334 flush_state = 0;
1335 else
1336 space_info->flush = 0;
1337 } else {
1338 flush_state = 0;
1339 }
1340
1341 /* Something happened, fail everything and bail. */
1342 if (BTRFS_FS_ERROR(fs_info))
1343 goto aborted_fs;
1344
1345 }
1346 spin_unlock(&space_info->lock);
1347 }
1348 return;
1349
1350 aborted_fs:
1351 maybe_fail_all_tickets(fs_info, space_info);
1352 space_info->flush = 0;
1353 spin_unlock(&space_info->lock);
1354 }
1355
btrfs_init_async_reclaim_work(struct btrfs_fs_info * fs_info)1356 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1357 {
1358 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1359 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1360 INIT_WORK(&fs_info->preempt_reclaim_work,
1361 btrfs_preempt_reclaim_metadata_space);
1362 }
1363
1364 static const enum btrfs_flush_state priority_flush_states[] = {
1365 FLUSH_DELAYED_ITEMS_NR,
1366 FLUSH_DELAYED_ITEMS,
1367 ALLOC_CHUNK,
1368 };
1369
1370 static const enum btrfs_flush_state evict_flush_states[] = {
1371 FLUSH_DELAYED_ITEMS_NR,
1372 FLUSH_DELAYED_ITEMS,
1373 FLUSH_DELAYED_REFS_NR,
1374 FLUSH_DELAYED_REFS,
1375 FLUSH_DELALLOC,
1376 FLUSH_DELALLOC_WAIT,
1377 FLUSH_DELALLOC_FULL,
1378 ALLOC_CHUNK,
1379 COMMIT_TRANS,
1380 };
1381
priority_reclaim_metadata_space(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,struct reserve_ticket * ticket,const enum btrfs_flush_state * states,int states_nr)1382 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1383 struct btrfs_space_info *space_info,
1384 struct reserve_ticket *ticket,
1385 const enum btrfs_flush_state *states,
1386 int states_nr)
1387 {
1388 u64 to_reclaim;
1389 int flush_state = 0;
1390
1391 spin_lock(&space_info->lock);
1392 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1393 /*
1394 * This is the priority reclaim path, so to_reclaim could be >0 still
1395 * because we may have only satisfied the priority tickets and still
1396 * left non priority tickets on the list. We would then have
1397 * to_reclaim but ->bytes == 0.
1398 */
1399 if (ticket->bytes == 0) {
1400 spin_unlock(&space_info->lock);
1401 return;
1402 }
1403
1404 while (flush_state < states_nr) {
1405 spin_unlock(&space_info->lock);
1406 flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1407 false);
1408 flush_state++;
1409 spin_lock(&space_info->lock);
1410 if (ticket->bytes == 0) {
1411 spin_unlock(&space_info->lock);
1412 return;
1413 }
1414 }
1415
1416 /* Attempt to steal from the global rsv if we can. */
1417 if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
1418 ticket->error = -ENOSPC;
1419 remove_ticket(space_info, ticket);
1420 }
1421
1422 /*
1423 * We must run try_granting_tickets here because we could be a large
1424 * ticket in front of a smaller ticket that can now be satisfied with
1425 * the available space.
1426 */
1427 btrfs_try_granting_tickets(fs_info, space_info);
1428 spin_unlock(&space_info->lock);
1429 }
1430
priority_reclaim_data_space(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,struct reserve_ticket * ticket)1431 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1432 struct btrfs_space_info *space_info,
1433 struct reserve_ticket *ticket)
1434 {
1435 spin_lock(&space_info->lock);
1436
1437 /* We could have been granted before we got here. */
1438 if (ticket->bytes == 0) {
1439 spin_unlock(&space_info->lock);
1440 return;
1441 }
1442
1443 while (!space_info->full) {
1444 spin_unlock(&space_info->lock);
1445 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1446 spin_lock(&space_info->lock);
1447 if (ticket->bytes == 0) {
1448 spin_unlock(&space_info->lock);
1449 return;
1450 }
1451 }
1452
1453 ticket->error = -ENOSPC;
1454 remove_ticket(space_info, ticket);
1455 btrfs_try_granting_tickets(fs_info, space_info);
1456 spin_unlock(&space_info->lock);
1457 }
1458
wait_reserve_ticket(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,struct reserve_ticket * ticket)1459 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1460 struct btrfs_space_info *space_info,
1461 struct reserve_ticket *ticket)
1462
1463 {
1464 DEFINE_WAIT(wait);
1465 int ret = 0;
1466
1467 spin_lock(&space_info->lock);
1468 while (ticket->bytes > 0 && ticket->error == 0) {
1469 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1470 if (ret) {
1471 /*
1472 * Delete us from the list. After we unlock the space
1473 * info, we don't want the async reclaim job to reserve
1474 * space for this ticket. If that would happen, then the
1475 * ticket's task would not known that space was reserved
1476 * despite getting an error, resulting in a space leak
1477 * (bytes_may_use counter of our space_info).
1478 */
1479 remove_ticket(space_info, ticket);
1480 ticket->error = -EINTR;
1481 break;
1482 }
1483 spin_unlock(&space_info->lock);
1484
1485 schedule();
1486
1487 finish_wait(&ticket->wait, &wait);
1488 spin_lock(&space_info->lock);
1489 }
1490 spin_unlock(&space_info->lock);
1491 }
1492
1493 /**
1494 * Do the appropriate flushing and waiting for a ticket
1495 *
1496 * @fs_info: the filesystem
1497 * @space_info: space info for the reservation
1498 * @ticket: ticket for the reservation
1499 * @start_ns: timestamp when the reservation started
1500 * @orig_bytes: amount of bytes originally reserved
1501 * @flush: how much we can flush
1502 *
1503 * This does the work of figuring out how to flush for the ticket, waiting for
1504 * the reservation, and returning the appropriate error if there is one.
1505 */
handle_reserve_ticket(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,struct reserve_ticket * ticket,u64 start_ns,u64 orig_bytes,enum btrfs_reserve_flush_enum flush)1506 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1507 struct btrfs_space_info *space_info,
1508 struct reserve_ticket *ticket,
1509 u64 start_ns, u64 orig_bytes,
1510 enum btrfs_reserve_flush_enum flush)
1511 {
1512 int ret;
1513
1514 switch (flush) {
1515 case BTRFS_RESERVE_FLUSH_DATA:
1516 case BTRFS_RESERVE_FLUSH_ALL:
1517 case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1518 wait_reserve_ticket(fs_info, space_info, ticket);
1519 break;
1520 case BTRFS_RESERVE_FLUSH_LIMIT:
1521 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1522 priority_flush_states,
1523 ARRAY_SIZE(priority_flush_states));
1524 break;
1525 case BTRFS_RESERVE_FLUSH_EVICT:
1526 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1527 evict_flush_states,
1528 ARRAY_SIZE(evict_flush_states));
1529 break;
1530 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1531 priority_reclaim_data_space(fs_info, space_info, ticket);
1532 break;
1533 default:
1534 ASSERT(0);
1535 break;
1536 }
1537
1538 ret = ticket->error;
1539 ASSERT(list_empty(&ticket->list));
1540 /*
1541 * Check that we can't have an error set if the reservation succeeded,
1542 * as that would confuse tasks and lead them to error out without
1543 * releasing reserved space (if an error happens the expectation is that
1544 * space wasn't reserved at all).
1545 */
1546 ASSERT(!(ticket->bytes == 0 && ticket->error));
1547 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1548 start_ns, flush, ticket->error);
1549 return ret;
1550 }
1551
1552 /*
1553 * This returns true if this flush state will go through the ordinary flushing
1554 * code.
1555 */
is_normal_flushing(enum btrfs_reserve_flush_enum flush)1556 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1557 {
1558 return (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1559 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1560 }
1561
maybe_clamp_preempt(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info)1562 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1563 struct btrfs_space_info *space_info)
1564 {
1565 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1566 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1567
1568 /*
1569 * If we're heavy on ordered operations then clamping won't help us. We
1570 * need to clamp specifically to keep up with dirty'ing buffered
1571 * writers, because there's not a 1:1 correlation of writing delalloc
1572 * and freeing space, like there is with flushing delayed refs or
1573 * delayed nodes. If we're already more ordered than delalloc then
1574 * we're keeping up, otherwise we aren't and should probably clamp.
1575 */
1576 if (ordered < delalloc)
1577 space_info->clamp = min(space_info->clamp + 1, 8);
1578 }
1579
can_steal(enum btrfs_reserve_flush_enum flush)1580 static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1581 {
1582 return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1583 flush == BTRFS_RESERVE_FLUSH_EVICT);
1584 }
1585
1586 /**
1587 * Try to reserve bytes from the block_rsv's space
1588 *
1589 * @fs_info: the filesystem
1590 * @space_info: space info we want to allocate from
1591 * @orig_bytes: number of bytes we want
1592 * @flush: whether or not we can flush to make our reservation
1593 *
1594 * This will reserve orig_bytes number of bytes from the space info associated
1595 * with the block_rsv. If there is not enough space it will make an attempt to
1596 * flush out space to make room. It will do this by flushing delalloc if
1597 * possible or committing the transaction. If flush is 0 then no attempts to
1598 * regain reservations will be made and this will fail if there is not enough
1599 * space already.
1600 */
__reserve_bytes(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,u64 orig_bytes,enum btrfs_reserve_flush_enum flush)1601 static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1602 struct btrfs_space_info *space_info, u64 orig_bytes,
1603 enum btrfs_reserve_flush_enum flush)
1604 {
1605 struct work_struct *async_work;
1606 struct reserve_ticket ticket;
1607 u64 start_ns = 0;
1608 u64 used;
1609 int ret = 0;
1610 bool pending_tickets;
1611
1612 ASSERT(orig_bytes);
1613 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
1614
1615 if (flush == BTRFS_RESERVE_FLUSH_DATA)
1616 async_work = &fs_info->async_data_reclaim_work;
1617 else
1618 async_work = &fs_info->async_reclaim_work;
1619
1620 spin_lock(&space_info->lock);
1621 ret = -ENOSPC;
1622 used = btrfs_space_info_used(space_info, true);
1623
1624 /*
1625 * We don't want NO_FLUSH allocations to jump everybody, they can
1626 * generally handle ENOSPC in a different way, so treat them the same as
1627 * normal flushers when it comes to skipping pending tickets.
1628 */
1629 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1630 pending_tickets = !list_empty(&space_info->tickets) ||
1631 !list_empty(&space_info->priority_tickets);
1632 else
1633 pending_tickets = !list_empty(&space_info->priority_tickets);
1634
1635 /*
1636 * Carry on if we have enough space (short-circuit) OR call
1637 * can_overcommit() to ensure we can overcommit to continue.
1638 */
1639 if (!pending_tickets &&
1640 ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) ||
1641 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1642 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1643 orig_bytes);
1644 ret = 0;
1645 }
1646
1647 /*
1648 * If we couldn't make a reservation then setup our reservation ticket
1649 * and kick the async worker if it's not already running.
1650 *
1651 * If we are a priority flusher then we just need to add our ticket to
1652 * the list and we will do our own flushing further down.
1653 */
1654 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
1655 ticket.bytes = orig_bytes;
1656 ticket.error = 0;
1657 space_info->reclaim_size += ticket.bytes;
1658 init_waitqueue_head(&ticket.wait);
1659 ticket.steal = can_steal(flush);
1660 if (trace_btrfs_reserve_ticket_enabled())
1661 start_ns = ktime_get_ns();
1662
1663 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1664 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1665 flush == BTRFS_RESERVE_FLUSH_DATA) {
1666 list_add_tail(&ticket.list, &space_info->tickets);
1667 if (!space_info->flush) {
1668 /*
1669 * We were forced to add a reserve ticket, so
1670 * our preemptive flushing is unable to keep
1671 * up. Clamp down on the threshold for the
1672 * preemptive flushing in order to keep up with
1673 * the workload.
1674 */
1675 maybe_clamp_preempt(fs_info, space_info);
1676
1677 space_info->flush = 1;
1678 trace_btrfs_trigger_flush(fs_info,
1679 space_info->flags,
1680 orig_bytes, flush,
1681 "enospc");
1682 queue_work(system_unbound_wq, async_work);
1683 }
1684 } else {
1685 list_add_tail(&ticket.list,
1686 &space_info->priority_tickets);
1687 }
1688 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1689 /*
1690 * We will do the space reservation dance during log replay,
1691 * which means we won't have fs_info->fs_root set, so don't do
1692 * the async reclaim as we will panic.
1693 */
1694 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1695 !work_busy(&fs_info->preempt_reclaim_work) &&
1696 need_preemptive_reclaim(fs_info, space_info)) {
1697 trace_btrfs_trigger_flush(fs_info, space_info->flags,
1698 orig_bytes, flush, "preempt");
1699 queue_work(system_unbound_wq,
1700 &fs_info->preempt_reclaim_work);
1701 }
1702 }
1703 spin_unlock(&space_info->lock);
1704 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
1705 return ret;
1706
1707 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1708 orig_bytes, flush);
1709 }
1710
1711 /**
1712 * Trye to reserve metadata bytes from the block_rsv's space
1713 *
1714 * @fs_info: the filesystem
1715 * @block_rsv: block_rsv we're allocating for
1716 * @orig_bytes: number of bytes we want
1717 * @flush: whether or not we can flush to make our reservation
1718 *
1719 * This will reserve orig_bytes number of bytes from the space info associated
1720 * with the block_rsv. If there is not enough space it will make an attempt to
1721 * flush out space to make room. It will do this by flushing delalloc if
1722 * possible or committing the transaction. If flush is 0 then no attempts to
1723 * regain reservations will be made and this will fail if there is not enough
1724 * space already.
1725 */
btrfs_reserve_metadata_bytes(struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * block_rsv,u64 orig_bytes,enum btrfs_reserve_flush_enum flush)1726 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1727 struct btrfs_block_rsv *block_rsv,
1728 u64 orig_bytes,
1729 enum btrfs_reserve_flush_enum flush)
1730 {
1731 int ret;
1732
1733 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
1734 if (ret == -ENOSPC) {
1735 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1736 block_rsv->space_info->flags,
1737 orig_bytes, 1);
1738
1739 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1740 btrfs_dump_space_info(fs_info, block_rsv->space_info,
1741 orig_bytes, 0);
1742 }
1743 return ret;
1744 }
1745
1746 /**
1747 * Try to reserve data bytes for an allocation
1748 *
1749 * @fs_info: the filesystem
1750 * @bytes: number of bytes we need
1751 * @flush: how we are allowed to flush
1752 *
1753 * This will reserve bytes from the data space info. If there is not enough
1754 * space then we will attempt to flush space as specified by flush.
1755 */
btrfs_reserve_data_bytes(struct btrfs_fs_info * fs_info,u64 bytes,enum btrfs_reserve_flush_enum flush)1756 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1757 enum btrfs_reserve_flush_enum flush)
1758 {
1759 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1760 int ret;
1761
1762 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1763 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE ||
1764 flush == BTRFS_RESERVE_NO_FLUSH);
1765 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1766
1767 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1768 if (ret == -ENOSPC) {
1769 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1770 data_sinfo->flags, bytes, 1);
1771 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1772 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
1773 }
1774 return ret;
1775 }
1776
1777 /* Dump all the space infos when we abort a transaction due to ENOSPC. */
btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info * fs_info)1778 __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info)
1779 {
1780 struct btrfs_space_info *space_info;
1781
1782 btrfs_info(fs_info, "dumping space info:");
1783 list_for_each_entry(space_info, &fs_info->space_info, list) {
1784 spin_lock(&space_info->lock);
1785 __btrfs_dump_space_info(fs_info, space_info);
1786 spin_unlock(&space_info->lock);
1787 }
1788 dump_global_block_rsv(fs_info);
1789 }
1790