1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 */
6
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include "delayed-inode.h"
10 #include "disk-io.h"
11 #include "transaction.h"
12 #include "ctree.h"
13 #include "qgroup.h"
14
15 #define BTRFS_DELAYED_WRITEBACK 512
16 #define BTRFS_DELAYED_BACKGROUND 128
17 #define BTRFS_DELAYED_BATCH 16
18
19 static struct kmem_cache *delayed_node_cache;
20
btrfs_delayed_inode_init(void)21 int __init btrfs_delayed_inode_init(void)
22 {
23 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
24 sizeof(struct btrfs_delayed_node),
25 0,
26 SLAB_MEM_SPREAD,
27 NULL);
28 if (!delayed_node_cache)
29 return -ENOMEM;
30 return 0;
31 }
32
btrfs_delayed_inode_exit(void)33 void __cold btrfs_delayed_inode_exit(void)
34 {
35 kmem_cache_destroy(delayed_node_cache);
36 }
37
btrfs_init_delayed_node(struct btrfs_delayed_node * delayed_node,struct btrfs_root * root,u64 inode_id)38 static inline void btrfs_init_delayed_node(
39 struct btrfs_delayed_node *delayed_node,
40 struct btrfs_root *root, u64 inode_id)
41 {
42 delayed_node->root = root;
43 delayed_node->inode_id = inode_id;
44 refcount_set(&delayed_node->refs, 0);
45 delayed_node->ins_root = RB_ROOT;
46 delayed_node->del_root = RB_ROOT;
47 mutex_init(&delayed_node->mutex);
48 INIT_LIST_HEAD(&delayed_node->n_list);
49 INIT_LIST_HEAD(&delayed_node->p_list);
50 }
51
btrfs_is_continuous_delayed_item(struct btrfs_delayed_item * item1,struct btrfs_delayed_item * item2)52 static inline int btrfs_is_continuous_delayed_item(
53 struct btrfs_delayed_item *item1,
54 struct btrfs_delayed_item *item2)
55 {
56 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
57 item1->key.objectid == item2->key.objectid &&
58 item1->key.type == item2->key.type &&
59 item1->key.offset + 1 == item2->key.offset)
60 return 1;
61 return 0;
62 }
63
btrfs_get_delayed_node(struct btrfs_inode * btrfs_inode)64 static struct btrfs_delayed_node *btrfs_get_delayed_node(
65 struct btrfs_inode *btrfs_inode)
66 {
67 struct btrfs_root *root = btrfs_inode->root;
68 u64 ino = btrfs_ino(btrfs_inode);
69 struct btrfs_delayed_node *node;
70
71 node = READ_ONCE(btrfs_inode->delayed_node);
72 if (node) {
73 refcount_inc(&node->refs);
74 return node;
75 }
76
77 spin_lock(&root->inode_lock);
78 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
79
80 if (node) {
81 if (btrfs_inode->delayed_node) {
82 refcount_inc(&node->refs); /* can be accessed */
83 BUG_ON(btrfs_inode->delayed_node != node);
84 spin_unlock(&root->inode_lock);
85 return node;
86 }
87
88 /*
89 * It's possible that we're racing into the middle of removing
90 * this node from the radix tree. In this case, the refcount
91 * was zero and it should never go back to one. Just return
92 * NULL like it was never in the radix at all; our release
93 * function is in the process of removing it.
94 *
95 * Some implementations of refcount_inc refuse to bump the
96 * refcount once it has hit zero. If we don't do this dance
97 * here, refcount_inc() may decide to just WARN_ONCE() instead
98 * of actually bumping the refcount.
99 *
100 * If this node is properly in the radix, we want to bump the
101 * refcount twice, once for the inode and once for this get
102 * operation.
103 */
104 if (refcount_inc_not_zero(&node->refs)) {
105 refcount_inc(&node->refs);
106 btrfs_inode->delayed_node = node;
107 } else {
108 node = NULL;
109 }
110
111 spin_unlock(&root->inode_lock);
112 return node;
113 }
114 spin_unlock(&root->inode_lock);
115
116 return NULL;
117 }
118
119 /* Will return either the node or PTR_ERR(-ENOMEM) */
btrfs_get_or_create_delayed_node(struct btrfs_inode * btrfs_inode)120 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
121 struct btrfs_inode *btrfs_inode)
122 {
123 struct btrfs_delayed_node *node;
124 struct btrfs_root *root = btrfs_inode->root;
125 u64 ino = btrfs_ino(btrfs_inode);
126 int ret;
127
128 again:
129 node = btrfs_get_delayed_node(btrfs_inode);
130 if (node)
131 return node;
132
133 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
134 if (!node)
135 return ERR_PTR(-ENOMEM);
136 btrfs_init_delayed_node(node, root, ino);
137
138 /* cached in the btrfs inode and can be accessed */
139 refcount_set(&node->refs, 2);
140
141 ret = radix_tree_preload(GFP_NOFS);
142 if (ret) {
143 kmem_cache_free(delayed_node_cache, node);
144 return ERR_PTR(ret);
145 }
146
147 spin_lock(&root->inode_lock);
148 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
149 if (ret == -EEXIST) {
150 spin_unlock(&root->inode_lock);
151 kmem_cache_free(delayed_node_cache, node);
152 radix_tree_preload_end();
153 goto again;
154 }
155 btrfs_inode->delayed_node = node;
156 spin_unlock(&root->inode_lock);
157 radix_tree_preload_end();
158
159 return node;
160 }
161
162 /*
163 * Call it when holding delayed_node->mutex
164 *
165 * If mod = 1, add this node into the prepared list.
166 */
btrfs_queue_delayed_node(struct btrfs_delayed_root * root,struct btrfs_delayed_node * node,int mod)167 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
168 struct btrfs_delayed_node *node,
169 int mod)
170 {
171 spin_lock(&root->lock);
172 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
173 if (!list_empty(&node->p_list))
174 list_move_tail(&node->p_list, &root->prepare_list);
175 else if (mod)
176 list_add_tail(&node->p_list, &root->prepare_list);
177 } else {
178 list_add_tail(&node->n_list, &root->node_list);
179 list_add_tail(&node->p_list, &root->prepare_list);
180 refcount_inc(&node->refs); /* inserted into list */
181 root->nodes++;
182 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
183 }
184 spin_unlock(&root->lock);
185 }
186
187 /* Call it when holding delayed_node->mutex */
btrfs_dequeue_delayed_node(struct btrfs_delayed_root * root,struct btrfs_delayed_node * node)188 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
189 struct btrfs_delayed_node *node)
190 {
191 spin_lock(&root->lock);
192 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
193 root->nodes--;
194 refcount_dec(&node->refs); /* not in the list */
195 list_del_init(&node->n_list);
196 if (!list_empty(&node->p_list))
197 list_del_init(&node->p_list);
198 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
199 }
200 spin_unlock(&root->lock);
201 }
202
btrfs_first_delayed_node(struct btrfs_delayed_root * delayed_root)203 static struct btrfs_delayed_node *btrfs_first_delayed_node(
204 struct btrfs_delayed_root *delayed_root)
205 {
206 struct list_head *p;
207 struct btrfs_delayed_node *node = NULL;
208
209 spin_lock(&delayed_root->lock);
210 if (list_empty(&delayed_root->node_list))
211 goto out;
212
213 p = delayed_root->node_list.next;
214 node = list_entry(p, struct btrfs_delayed_node, n_list);
215 refcount_inc(&node->refs);
216 out:
217 spin_unlock(&delayed_root->lock);
218
219 return node;
220 }
221
btrfs_next_delayed_node(struct btrfs_delayed_node * node)222 static struct btrfs_delayed_node *btrfs_next_delayed_node(
223 struct btrfs_delayed_node *node)
224 {
225 struct btrfs_delayed_root *delayed_root;
226 struct list_head *p;
227 struct btrfs_delayed_node *next = NULL;
228
229 delayed_root = node->root->fs_info->delayed_root;
230 spin_lock(&delayed_root->lock);
231 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
232 /* not in the list */
233 if (list_empty(&delayed_root->node_list))
234 goto out;
235 p = delayed_root->node_list.next;
236 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
237 goto out;
238 else
239 p = node->n_list.next;
240
241 next = list_entry(p, struct btrfs_delayed_node, n_list);
242 refcount_inc(&next->refs);
243 out:
244 spin_unlock(&delayed_root->lock);
245
246 return next;
247 }
248
__btrfs_release_delayed_node(struct btrfs_delayed_node * delayed_node,int mod)249 static void __btrfs_release_delayed_node(
250 struct btrfs_delayed_node *delayed_node,
251 int mod)
252 {
253 struct btrfs_delayed_root *delayed_root;
254
255 if (!delayed_node)
256 return;
257
258 delayed_root = delayed_node->root->fs_info->delayed_root;
259
260 mutex_lock(&delayed_node->mutex);
261 if (delayed_node->count)
262 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
263 else
264 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
265 mutex_unlock(&delayed_node->mutex);
266
267 if (refcount_dec_and_test(&delayed_node->refs)) {
268 struct btrfs_root *root = delayed_node->root;
269
270 spin_lock(&root->inode_lock);
271 /*
272 * Once our refcount goes to zero, nobody is allowed to bump it
273 * back up. We can delete it now.
274 */
275 ASSERT(refcount_read(&delayed_node->refs) == 0);
276 radix_tree_delete(&root->delayed_nodes_tree,
277 delayed_node->inode_id);
278 spin_unlock(&root->inode_lock);
279 kmem_cache_free(delayed_node_cache, delayed_node);
280 }
281 }
282
btrfs_release_delayed_node(struct btrfs_delayed_node * node)283 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
284 {
285 __btrfs_release_delayed_node(node, 0);
286 }
287
btrfs_first_prepared_delayed_node(struct btrfs_delayed_root * delayed_root)288 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
289 struct btrfs_delayed_root *delayed_root)
290 {
291 struct list_head *p;
292 struct btrfs_delayed_node *node = NULL;
293
294 spin_lock(&delayed_root->lock);
295 if (list_empty(&delayed_root->prepare_list))
296 goto out;
297
298 p = delayed_root->prepare_list.next;
299 list_del_init(p);
300 node = list_entry(p, struct btrfs_delayed_node, p_list);
301 refcount_inc(&node->refs);
302 out:
303 spin_unlock(&delayed_root->lock);
304
305 return node;
306 }
307
btrfs_release_prepared_delayed_node(struct btrfs_delayed_node * node)308 static inline void btrfs_release_prepared_delayed_node(
309 struct btrfs_delayed_node *node)
310 {
311 __btrfs_release_delayed_node(node, 1);
312 }
313
btrfs_alloc_delayed_item(u32 data_len)314 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
315 {
316 struct btrfs_delayed_item *item;
317 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
318 if (item) {
319 item->data_len = data_len;
320 item->ins_or_del = 0;
321 item->bytes_reserved = 0;
322 item->delayed_node = NULL;
323 refcount_set(&item->refs, 1);
324 }
325 return item;
326 }
327
328 /*
329 * __btrfs_lookup_delayed_item - look up the delayed item by key
330 * @delayed_node: pointer to the delayed node
331 * @key: the key to look up
332 * @prev: used to store the prev item if the right item isn't found
333 * @next: used to store the next item if the right item isn't found
334 *
335 * Note: if we don't find the right item, we will return the prev item and
336 * the next item.
337 */
__btrfs_lookup_delayed_item(struct rb_root * root,struct btrfs_key * key,struct btrfs_delayed_item ** prev,struct btrfs_delayed_item ** next)338 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
339 struct rb_root *root,
340 struct btrfs_key *key,
341 struct btrfs_delayed_item **prev,
342 struct btrfs_delayed_item **next)
343 {
344 struct rb_node *node, *prev_node = NULL;
345 struct btrfs_delayed_item *delayed_item = NULL;
346 int ret = 0;
347
348 node = root->rb_node;
349
350 while (node) {
351 delayed_item = rb_entry(node, struct btrfs_delayed_item,
352 rb_node);
353 prev_node = node;
354 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
355 if (ret < 0)
356 node = node->rb_right;
357 else if (ret > 0)
358 node = node->rb_left;
359 else
360 return delayed_item;
361 }
362
363 if (prev) {
364 if (!prev_node)
365 *prev = NULL;
366 else if (ret < 0)
367 *prev = delayed_item;
368 else if ((node = rb_prev(prev_node)) != NULL) {
369 *prev = rb_entry(node, struct btrfs_delayed_item,
370 rb_node);
371 } else
372 *prev = NULL;
373 }
374
375 if (next) {
376 if (!prev_node)
377 *next = NULL;
378 else if (ret > 0)
379 *next = delayed_item;
380 else if ((node = rb_next(prev_node)) != NULL) {
381 *next = rb_entry(node, struct btrfs_delayed_item,
382 rb_node);
383 } else
384 *next = NULL;
385 }
386 return NULL;
387 }
388
__btrfs_lookup_delayed_insertion_item(struct btrfs_delayed_node * delayed_node,struct btrfs_key * key)389 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
390 struct btrfs_delayed_node *delayed_node,
391 struct btrfs_key *key)
392 {
393 return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
394 NULL, NULL);
395 }
396
__btrfs_add_delayed_item(struct btrfs_delayed_node * delayed_node,struct btrfs_delayed_item * ins,int action)397 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
398 struct btrfs_delayed_item *ins,
399 int action)
400 {
401 struct rb_node **p, *node;
402 struct rb_node *parent_node = NULL;
403 struct rb_root *root;
404 struct btrfs_delayed_item *item;
405 int cmp;
406
407 if (action == BTRFS_DELAYED_INSERTION_ITEM)
408 root = &delayed_node->ins_root;
409 else if (action == BTRFS_DELAYED_DELETION_ITEM)
410 root = &delayed_node->del_root;
411 else
412 BUG();
413 p = &root->rb_node;
414 node = &ins->rb_node;
415
416 while (*p) {
417 parent_node = *p;
418 item = rb_entry(parent_node, struct btrfs_delayed_item,
419 rb_node);
420
421 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
422 if (cmp < 0)
423 p = &(*p)->rb_right;
424 else if (cmp > 0)
425 p = &(*p)->rb_left;
426 else
427 return -EEXIST;
428 }
429
430 rb_link_node(node, parent_node, p);
431 rb_insert_color(node, root);
432 ins->delayed_node = delayed_node;
433 ins->ins_or_del = action;
434
435 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
436 action == BTRFS_DELAYED_INSERTION_ITEM &&
437 ins->key.offset >= delayed_node->index_cnt)
438 delayed_node->index_cnt = ins->key.offset + 1;
439
440 delayed_node->count++;
441 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
442 return 0;
443 }
444
__btrfs_add_delayed_insertion_item(struct btrfs_delayed_node * node,struct btrfs_delayed_item * item)445 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
446 struct btrfs_delayed_item *item)
447 {
448 return __btrfs_add_delayed_item(node, item,
449 BTRFS_DELAYED_INSERTION_ITEM);
450 }
451
__btrfs_add_delayed_deletion_item(struct btrfs_delayed_node * node,struct btrfs_delayed_item * item)452 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
453 struct btrfs_delayed_item *item)
454 {
455 return __btrfs_add_delayed_item(node, item,
456 BTRFS_DELAYED_DELETION_ITEM);
457 }
458
finish_one_item(struct btrfs_delayed_root * delayed_root)459 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
460 {
461 int seq = atomic_inc_return(&delayed_root->items_seq);
462
463 /* atomic_dec_return implies a barrier */
464 if ((atomic_dec_return(&delayed_root->items) <
465 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
466 cond_wake_up_nomb(&delayed_root->wait);
467 }
468
__btrfs_remove_delayed_item(struct btrfs_delayed_item * delayed_item)469 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
470 {
471 struct rb_root *root;
472 struct btrfs_delayed_root *delayed_root;
473
474 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
475
476 BUG_ON(!delayed_root);
477 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
478 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
479
480 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
481 root = &delayed_item->delayed_node->ins_root;
482 else
483 root = &delayed_item->delayed_node->del_root;
484
485 rb_erase(&delayed_item->rb_node, root);
486 delayed_item->delayed_node->count--;
487
488 finish_one_item(delayed_root);
489 }
490
btrfs_release_delayed_item(struct btrfs_delayed_item * item)491 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
492 {
493 if (item) {
494 __btrfs_remove_delayed_item(item);
495 if (refcount_dec_and_test(&item->refs))
496 kfree(item);
497 }
498 }
499
__btrfs_first_delayed_insertion_item(struct btrfs_delayed_node * delayed_node)500 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
501 struct btrfs_delayed_node *delayed_node)
502 {
503 struct rb_node *p;
504 struct btrfs_delayed_item *item = NULL;
505
506 p = rb_first(&delayed_node->ins_root);
507 if (p)
508 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
509
510 return item;
511 }
512
__btrfs_first_delayed_deletion_item(struct btrfs_delayed_node * delayed_node)513 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
514 struct btrfs_delayed_node *delayed_node)
515 {
516 struct rb_node *p;
517 struct btrfs_delayed_item *item = NULL;
518
519 p = rb_first(&delayed_node->del_root);
520 if (p)
521 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
522
523 return item;
524 }
525
__btrfs_next_delayed_item(struct btrfs_delayed_item * item)526 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
527 struct btrfs_delayed_item *item)
528 {
529 struct rb_node *p;
530 struct btrfs_delayed_item *next = NULL;
531
532 p = rb_next(&item->rb_node);
533 if (p)
534 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
535
536 return next;
537 }
538
btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_delayed_item * item)539 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
540 struct btrfs_root *root,
541 struct btrfs_delayed_item *item)
542 {
543 struct btrfs_block_rsv *src_rsv;
544 struct btrfs_block_rsv *dst_rsv;
545 struct btrfs_fs_info *fs_info = root->fs_info;
546 u64 num_bytes;
547 int ret;
548
549 if (!trans->bytes_reserved)
550 return 0;
551
552 src_rsv = trans->block_rsv;
553 dst_rsv = &fs_info->delayed_block_rsv;
554
555 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
556
557 /*
558 * Here we migrate space rsv from transaction rsv, since have already
559 * reserved space when starting a transaction. So no need to reserve
560 * qgroup space here.
561 */
562 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
563 if (!ret) {
564 trace_btrfs_space_reservation(fs_info, "delayed_item",
565 item->key.objectid,
566 num_bytes, 1);
567 item->bytes_reserved = num_bytes;
568 }
569
570 return ret;
571 }
572
btrfs_delayed_item_release_metadata(struct btrfs_root * root,struct btrfs_delayed_item * item)573 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
574 struct btrfs_delayed_item *item)
575 {
576 struct btrfs_block_rsv *rsv;
577 struct btrfs_fs_info *fs_info = root->fs_info;
578
579 if (!item->bytes_reserved)
580 return;
581
582 rsv = &fs_info->delayed_block_rsv;
583 /*
584 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
585 * to release/reserve qgroup space.
586 */
587 trace_btrfs_space_reservation(fs_info, "delayed_item",
588 item->key.objectid, item->bytes_reserved,
589 0);
590 btrfs_block_rsv_release(fs_info, rsv,
591 item->bytes_reserved);
592 }
593
btrfs_delayed_inode_reserve_metadata(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_delayed_node * node)594 static int btrfs_delayed_inode_reserve_metadata(
595 struct btrfs_trans_handle *trans,
596 struct btrfs_root *root,
597 struct btrfs_inode *inode,
598 struct btrfs_delayed_node *node)
599 {
600 struct btrfs_fs_info *fs_info = root->fs_info;
601 struct btrfs_block_rsv *src_rsv;
602 struct btrfs_block_rsv *dst_rsv;
603 u64 num_bytes;
604 int ret;
605
606 src_rsv = trans->block_rsv;
607 dst_rsv = &fs_info->delayed_block_rsv;
608
609 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
610
611 /*
612 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
613 * which doesn't reserve space for speed. This is a problem since we
614 * still need to reserve space for this update, so try to reserve the
615 * space.
616 *
617 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
618 * we always reserve enough to update the inode item.
619 */
620 if (!src_rsv || (!trans->bytes_reserved &&
621 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
622 ret = btrfs_qgroup_reserve_meta_prealloc(root,
623 fs_info->nodesize, true);
624 if (ret < 0)
625 return ret;
626 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
627 BTRFS_RESERVE_NO_FLUSH);
628 /*
629 * Since we're under a transaction reserve_metadata_bytes could
630 * try to commit the transaction which will make it return
631 * EAGAIN to make us stop the transaction we have, so return
632 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
633 */
634 if (ret == -EAGAIN) {
635 ret = -ENOSPC;
636 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
637 }
638 if (!ret) {
639 node->bytes_reserved = num_bytes;
640 trace_btrfs_space_reservation(fs_info,
641 "delayed_inode",
642 btrfs_ino(inode),
643 num_bytes, 1);
644 } else {
645 btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
646 }
647 return ret;
648 }
649
650 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
651 if (!ret) {
652 trace_btrfs_space_reservation(fs_info, "delayed_inode",
653 btrfs_ino(inode), num_bytes, 1);
654 node->bytes_reserved = num_bytes;
655 }
656
657 return ret;
658 }
659
btrfs_delayed_inode_release_metadata(struct btrfs_fs_info * fs_info,struct btrfs_delayed_node * node,bool qgroup_free)660 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
661 struct btrfs_delayed_node *node,
662 bool qgroup_free)
663 {
664 struct btrfs_block_rsv *rsv;
665
666 if (!node->bytes_reserved)
667 return;
668
669 rsv = &fs_info->delayed_block_rsv;
670 trace_btrfs_space_reservation(fs_info, "delayed_inode",
671 node->inode_id, node->bytes_reserved, 0);
672 btrfs_block_rsv_release(fs_info, rsv,
673 node->bytes_reserved);
674 if (qgroup_free)
675 btrfs_qgroup_free_meta_prealloc(node->root,
676 node->bytes_reserved);
677 else
678 btrfs_qgroup_convert_reserved_meta(node->root,
679 node->bytes_reserved);
680 node->bytes_reserved = 0;
681 }
682
683 /*
684 * This helper will insert some continuous items into the same leaf according
685 * to the free space of the leaf.
686 */
btrfs_batch_insert_items(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * item)687 static int btrfs_batch_insert_items(struct btrfs_root *root,
688 struct btrfs_path *path,
689 struct btrfs_delayed_item *item)
690 {
691 struct btrfs_fs_info *fs_info = root->fs_info;
692 struct btrfs_delayed_item *curr, *next;
693 int free_space;
694 int total_data_size = 0, total_size = 0;
695 struct extent_buffer *leaf;
696 char *data_ptr;
697 struct btrfs_key *keys;
698 u32 *data_size;
699 struct list_head head;
700 int slot;
701 int nitems;
702 int i;
703 int ret = 0;
704
705 BUG_ON(!path->nodes[0]);
706
707 leaf = path->nodes[0];
708 free_space = btrfs_leaf_free_space(fs_info, leaf);
709 INIT_LIST_HEAD(&head);
710
711 next = item;
712 nitems = 0;
713
714 /*
715 * count the number of the continuous items that we can insert in batch
716 */
717 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
718 free_space) {
719 total_data_size += next->data_len;
720 total_size += next->data_len + sizeof(struct btrfs_item);
721 list_add_tail(&next->tree_list, &head);
722 nitems++;
723
724 curr = next;
725 next = __btrfs_next_delayed_item(curr);
726 if (!next)
727 break;
728
729 if (!btrfs_is_continuous_delayed_item(curr, next))
730 break;
731 }
732
733 if (!nitems) {
734 ret = 0;
735 goto out;
736 }
737
738 /*
739 * we need allocate some memory space, but it might cause the task
740 * to sleep, so we set all locked nodes in the path to blocking locks
741 * first.
742 */
743 btrfs_set_path_blocking(path);
744
745 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
746 if (!keys) {
747 ret = -ENOMEM;
748 goto out;
749 }
750
751 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
752 if (!data_size) {
753 ret = -ENOMEM;
754 goto error;
755 }
756
757 /* get keys of all the delayed items */
758 i = 0;
759 list_for_each_entry(next, &head, tree_list) {
760 keys[i] = next->key;
761 data_size[i] = next->data_len;
762 i++;
763 }
764
765 /* reset all the locked nodes in the patch to spinning locks. */
766 btrfs_clear_path_blocking(path, NULL, 0);
767
768 /* insert the keys of the items */
769 setup_items_for_insert(root, path, keys, data_size,
770 total_data_size, total_size, nitems);
771
772 /* insert the dir index items */
773 slot = path->slots[0];
774 list_for_each_entry_safe(curr, next, &head, tree_list) {
775 data_ptr = btrfs_item_ptr(leaf, slot, char);
776 write_extent_buffer(leaf, &curr->data,
777 (unsigned long)data_ptr,
778 curr->data_len);
779 slot++;
780
781 btrfs_delayed_item_release_metadata(root, curr);
782
783 list_del(&curr->tree_list);
784 btrfs_release_delayed_item(curr);
785 }
786
787 error:
788 kfree(data_size);
789 kfree(keys);
790 out:
791 return ret;
792 }
793
794 /*
795 * This helper can just do simple insertion that needn't extend item for new
796 * data, such as directory name index insertion, inode insertion.
797 */
btrfs_insert_delayed_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * delayed_item)798 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
799 struct btrfs_root *root,
800 struct btrfs_path *path,
801 struct btrfs_delayed_item *delayed_item)
802 {
803 struct extent_buffer *leaf;
804 char *ptr;
805 int ret;
806
807 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
808 delayed_item->data_len);
809 if (ret < 0 && ret != -EEXIST)
810 return ret;
811
812 leaf = path->nodes[0];
813
814 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
815
816 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
817 delayed_item->data_len);
818 btrfs_mark_buffer_dirty(leaf);
819
820 btrfs_delayed_item_release_metadata(root, delayed_item);
821 return 0;
822 }
823
824 /*
825 * we insert an item first, then if there are some continuous items, we try
826 * to insert those items into the same leaf.
827 */
btrfs_insert_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_root * root,struct btrfs_delayed_node * node)828 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
829 struct btrfs_path *path,
830 struct btrfs_root *root,
831 struct btrfs_delayed_node *node)
832 {
833 struct btrfs_delayed_item *curr, *prev;
834 int ret = 0;
835
836 do_again:
837 mutex_lock(&node->mutex);
838 curr = __btrfs_first_delayed_insertion_item(node);
839 if (!curr)
840 goto insert_end;
841
842 ret = btrfs_insert_delayed_item(trans, root, path, curr);
843 if (ret < 0) {
844 btrfs_release_path(path);
845 goto insert_end;
846 }
847
848 prev = curr;
849 curr = __btrfs_next_delayed_item(prev);
850 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
851 /* insert the continuous items into the same leaf */
852 path->slots[0]++;
853 btrfs_batch_insert_items(root, path, curr);
854 }
855 btrfs_release_delayed_item(prev);
856 btrfs_mark_buffer_dirty(path->nodes[0]);
857
858 btrfs_release_path(path);
859 mutex_unlock(&node->mutex);
860 goto do_again;
861
862 insert_end:
863 mutex_unlock(&node->mutex);
864 return ret;
865 }
866
btrfs_batch_delete_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * item)867 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
868 struct btrfs_root *root,
869 struct btrfs_path *path,
870 struct btrfs_delayed_item *item)
871 {
872 struct btrfs_delayed_item *curr, *next;
873 struct extent_buffer *leaf;
874 struct btrfs_key key;
875 struct list_head head;
876 int nitems, i, last_item;
877 int ret = 0;
878
879 BUG_ON(!path->nodes[0]);
880
881 leaf = path->nodes[0];
882
883 i = path->slots[0];
884 last_item = btrfs_header_nritems(leaf) - 1;
885 if (i > last_item)
886 return -ENOENT; /* FIXME: Is errno suitable? */
887
888 next = item;
889 INIT_LIST_HEAD(&head);
890 btrfs_item_key_to_cpu(leaf, &key, i);
891 nitems = 0;
892 /*
893 * count the number of the dir index items that we can delete in batch
894 */
895 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
896 list_add_tail(&next->tree_list, &head);
897 nitems++;
898
899 curr = next;
900 next = __btrfs_next_delayed_item(curr);
901 if (!next)
902 break;
903
904 if (!btrfs_is_continuous_delayed_item(curr, next))
905 break;
906
907 i++;
908 if (i > last_item)
909 break;
910 btrfs_item_key_to_cpu(leaf, &key, i);
911 }
912
913 if (!nitems)
914 return 0;
915
916 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
917 if (ret)
918 goto out;
919
920 list_for_each_entry_safe(curr, next, &head, tree_list) {
921 btrfs_delayed_item_release_metadata(root, curr);
922 list_del(&curr->tree_list);
923 btrfs_release_delayed_item(curr);
924 }
925
926 out:
927 return ret;
928 }
929
btrfs_delete_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_root * root,struct btrfs_delayed_node * node)930 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
931 struct btrfs_path *path,
932 struct btrfs_root *root,
933 struct btrfs_delayed_node *node)
934 {
935 struct btrfs_delayed_item *curr, *prev;
936 int ret = 0;
937
938 do_again:
939 mutex_lock(&node->mutex);
940 curr = __btrfs_first_delayed_deletion_item(node);
941 if (!curr)
942 goto delete_fail;
943
944 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
945 if (ret < 0)
946 goto delete_fail;
947 else if (ret > 0) {
948 /*
949 * can't find the item which the node points to, so this node
950 * is invalid, just drop it.
951 */
952 prev = curr;
953 curr = __btrfs_next_delayed_item(prev);
954 btrfs_release_delayed_item(prev);
955 ret = 0;
956 btrfs_release_path(path);
957 if (curr) {
958 mutex_unlock(&node->mutex);
959 goto do_again;
960 } else
961 goto delete_fail;
962 }
963
964 btrfs_batch_delete_items(trans, root, path, curr);
965 btrfs_release_path(path);
966 mutex_unlock(&node->mutex);
967 goto do_again;
968
969 delete_fail:
970 btrfs_release_path(path);
971 mutex_unlock(&node->mutex);
972 return ret;
973 }
974
btrfs_release_delayed_inode(struct btrfs_delayed_node * delayed_node)975 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
976 {
977 struct btrfs_delayed_root *delayed_root;
978
979 if (delayed_node &&
980 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
981 BUG_ON(!delayed_node->root);
982 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
983 delayed_node->count--;
984
985 delayed_root = delayed_node->root->fs_info->delayed_root;
986 finish_one_item(delayed_root);
987 }
988 }
989
btrfs_release_delayed_iref(struct btrfs_delayed_node * delayed_node)990 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
991 {
992 struct btrfs_delayed_root *delayed_root;
993
994 ASSERT(delayed_node->root);
995 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
996 delayed_node->count--;
997
998 delayed_root = delayed_node->root->fs_info->delayed_root;
999 finish_one_item(delayed_root);
1000 }
1001
__btrfs_update_delayed_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_node * node)1002 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1003 struct btrfs_root *root,
1004 struct btrfs_path *path,
1005 struct btrfs_delayed_node *node)
1006 {
1007 struct btrfs_fs_info *fs_info = root->fs_info;
1008 struct btrfs_key key;
1009 struct btrfs_inode_item *inode_item;
1010 struct extent_buffer *leaf;
1011 int mod;
1012 int ret;
1013
1014 key.objectid = node->inode_id;
1015 key.type = BTRFS_INODE_ITEM_KEY;
1016 key.offset = 0;
1017
1018 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1019 mod = -1;
1020 else
1021 mod = 1;
1022
1023 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1024 if (ret > 0) {
1025 btrfs_release_path(path);
1026 return -ENOENT;
1027 } else if (ret < 0) {
1028 return ret;
1029 }
1030
1031 leaf = path->nodes[0];
1032 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1033 struct btrfs_inode_item);
1034 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1035 sizeof(struct btrfs_inode_item));
1036 btrfs_mark_buffer_dirty(leaf);
1037
1038 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1039 goto no_iref;
1040
1041 path->slots[0]++;
1042 if (path->slots[0] >= btrfs_header_nritems(leaf))
1043 goto search;
1044 again:
1045 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1046 if (key.objectid != node->inode_id)
1047 goto out;
1048
1049 if (key.type != BTRFS_INODE_REF_KEY &&
1050 key.type != BTRFS_INODE_EXTREF_KEY)
1051 goto out;
1052
1053 /*
1054 * Delayed iref deletion is for the inode who has only one link,
1055 * so there is only one iref. The case that several irefs are
1056 * in the same item doesn't exist.
1057 */
1058 btrfs_del_item(trans, root, path);
1059 out:
1060 btrfs_release_delayed_iref(node);
1061 no_iref:
1062 btrfs_release_path(path);
1063 err_out:
1064 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1065 btrfs_release_delayed_inode(node);
1066
1067 return ret;
1068
1069 search:
1070 btrfs_release_path(path);
1071
1072 key.type = BTRFS_INODE_EXTREF_KEY;
1073 key.offset = -1;
1074 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1075 if (ret < 0)
1076 goto err_out;
1077 ASSERT(ret);
1078
1079 ret = 0;
1080 leaf = path->nodes[0];
1081 path->slots[0]--;
1082 goto again;
1083 }
1084
btrfs_update_delayed_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_node * node)1085 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1086 struct btrfs_root *root,
1087 struct btrfs_path *path,
1088 struct btrfs_delayed_node *node)
1089 {
1090 int ret;
1091
1092 mutex_lock(&node->mutex);
1093 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1094 mutex_unlock(&node->mutex);
1095 return 0;
1096 }
1097
1098 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1099 mutex_unlock(&node->mutex);
1100 return ret;
1101 }
1102
1103 static inline int
__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_delayed_node * node)1104 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1105 struct btrfs_path *path,
1106 struct btrfs_delayed_node *node)
1107 {
1108 int ret;
1109
1110 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1111 if (ret)
1112 return ret;
1113
1114 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1115 if (ret)
1116 return ret;
1117
1118 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1119 return ret;
1120 }
1121
1122 /*
1123 * Called when committing the transaction.
1124 * Returns 0 on success.
1125 * Returns < 0 on error and returns with an aborted transaction with any
1126 * outstanding delayed items cleaned up.
1127 */
__btrfs_run_delayed_items(struct btrfs_trans_handle * trans,int nr)1128 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1129 {
1130 struct btrfs_fs_info *fs_info = trans->fs_info;
1131 struct btrfs_delayed_root *delayed_root;
1132 struct btrfs_delayed_node *curr_node, *prev_node;
1133 struct btrfs_path *path;
1134 struct btrfs_block_rsv *block_rsv;
1135 int ret = 0;
1136 bool count = (nr > 0);
1137
1138 if (trans->aborted)
1139 return -EIO;
1140
1141 path = btrfs_alloc_path();
1142 if (!path)
1143 return -ENOMEM;
1144 path->leave_spinning = 1;
1145
1146 block_rsv = trans->block_rsv;
1147 trans->block_rsv = &fs_info->delayed_block_rsv;
1148
1149 delayed_root = fs_info->delayed_root;
1150
1151 curr_node = btrfs_first_delayed_node(delayed_root);
1152 while (curr_node && (!count || (count && nr--))) {
1153 ret = __btrfs_commit_inode_delayed_items(trans, path,
1154 curr_node);
1155 if (ret) {
1156 btrfs_release_delayed_node(curr_node);
1157 curr_node = NULL;
1158 btrfs_abort_transaction(trans, ret);
1159 break;
1160 }
1161
1162 prev_node = curr_node;
1163 curr_node = btrfs_next_delayed_node(curr_node);
1164 btrfs_release_delayed_node(prev_node);
1165 }
1166
1167 if (curr_node)
1168 btrfs_release_delayed_node(curr_node);
1169 btrfs_free_path(path);
1170 trans->block_rsv = block_rsv;
1171
1172 return ret;
1173 }
1174
btrfs_run_delayed_items(struct btrfs_trans_handle * trans)1175 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1176 {
1177 return __btrfs_run_delayed_items(trans, -1);
1178 }
1179
btrfs_run_delayed_items_nr(struct btrfs_trans_handle * trans,int nr)1180 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1181 {
1182 return __btrfs_run_delayed_items(trans, nr);
1183 }
1184
btrfs_commit_inode_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)1185 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1186 struct btrfs_inode *inode)
1187 {
1188 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1189 struct btrfs_path *path;
1190 struct btrfs_block_rsv *block_rsv;
1191 int ret;
1192
1193 if (!delayed_node)
1194 return 0;
1195
1196 mutex_lock(&delayed_node->mutex);
1197 if (!delayed_node->count) {
1198 mutex_unlock(&delayed_node->mutex);
1199 btrfs_release_delayed_node(delayed_node);
1200 return 0;
1201 }
1202 mutex_unlock(&delayed_node->mutex);
1203
1204 path = btrfs_alloc_path();
1205 if (!path) {
1206 btrfs_release_delayed_node(delayed_node);
1207 return -ENOMEM;
1208 }
1209 path->leave_spinning = 1;
1210
1211 block_rsv = trans->block_rsv;
1212 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1213
1214 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1215
1216 btrfs_release_delayed_node(delayed_node);
1217 btrfs_free_path(path);
1218 trans->block_rsv = block_rsv;
1219
1220 return ret;
1221 }
1222
btrfs_commit_inode_delayed_inode(struct btrfs_inode * inode)1223 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1224 {
1225 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1226 struct btrfs_trans_handle *trans;
1227 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1228 struct btrfs_path *path;
1229 struct btrfs_block_rsv *block_rsv;
1230 int ret;
1231
1232 if (!delayed_node)
1233 return 0;
1234
1235 mutex_lock(&delayed_node->mutex);
1236 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1237 mutex_unlock(&delayed_node->mutex);
1238 btrfs_release_delayed_node(delayed_node);
1239 return 0;
1240 }
1241 mutex_unlock(&delayed_node->mutex);
1242
1243 trans = btrfs_join_transaction(delayed_node->root);
1244 if (IS_ERR(trans)) {
1245 ret = PTR_ERR(trans);
1246 goto out;
1247 }
1248
1249 path = btrfs_alloc_path();
1250 if (!path) {
1251 ret = -ENOMEM;
1252 goto trans_out;
1253 }
1254 path->leave_spinning = 1;
1255
1256 block_rsv = trans->block_rsv;
1257 trans->block_rsv = &fs_info->delayed_block_rsv;
1258
1259 mutex_lock(&delayed_node->mutex);
1260 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1261 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1262 path, delayed_node);
1263 else
1264 ret = 0;
1265 mutex_unlock(&delayed_node->mutex);
1266
1267 btrfs_free_path(path);
1268 trans->block_rsv = block_rsv;
1269 trans_out:
1270 btrfs_end_transaction(trans);
1271 btrfs_btree_balance_dirty(fs_info);
1272 out:
1273 btrfs_release_delayed_node(delayed_node);
1274
1275 return ret;
1276 }
1277
btrfs_remove_delayed_node(struct btrfs_inode * inode)1278 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1279 {
1280 struct btrfs_delayed_node *delayed_node;
1281
1282 delayed_node = READ_ONCE(inode->delayed_node);
1283 if (!delayed_node)
1284 return;
1285
1286 inode->delayed_node = NULL;
1287 btrfs_release_delayed_node(delayed_node);
1288 }
1289
1290 struct btrfs_async_delayed_work {
1291 struct btrfs_delayed_root *delayed_root;
1292 int nr;
1293 struct btrfs_work work;
1294 };
1295
btrfs_async_run_delayed_root(struct btrfs_work * work)1296 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1297 {
1298 struct btrfs_async_delayed_work *async_work;
1299 struct btrfs_delayed_root *delayed_root;
1300 struct btrfs_trans_handle *trans;
1301 struct btrfs_path *path;
1302 struct btrfs_delayed_node *delayed_node = NULL;
1303 struct btrfs_root *root;
1304 struct btrfs_block_rsv *block_rsv;
1305 int total_done = 0;
1306
1307 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1308 delayed_root = async_work->delayed_root;
1309
1310 path = btrfs_alloc_path();
1311 if (!path)
1312 goto out;
1313
1314 do {
1315 if (atomic_read(&delayed_root->items) <
1316 BTRFS_DELAYED_BACKGROUND / 2)
1317 break;
1318
1319 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1320 if (!delayed_node)
1321 break;
1322
1323 path->leave_spinning = 1;
1324 root = delayed_node->root;
1325
1326 trans = btrfs_join_transaction(root);
1327 if (IS_ERR(trans)) {
1328 btrfs_release_path(path);
1329 btrfs_release_prepared_delayed_node(delayed_node);
1330 total_done++;
1331 continue;
1332 }
1333
1334 block_rsv = trans->block_rsv;
1335 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1336
1337 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1338
1339 trans->block_rsv = block_rsv;
1340 btrfs_end_transaction(trans);
1341 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1342
1343 btrfs_release_path(path);
1344 btrfs_release_prepared_delayed_node(delayed_node);
1345 total_done++;
1346
1347 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1348 || total_done < async_work->nr);
1349
1350 btrfs_free_path(path);
1351 out:
1352 wake_up(&delayed_root->wait);
1353 kfree(async_work);
1354 }
1355
1356
btrfs_wq_run_delayed_node(struct btrfs_delayed_root * delayed_root,struct btrfs_fs_info * fs_info,int nr)1357 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1358 struct btrfs_fs_info *fs_info, int nr)
1359 {
1360 struct btrfs_async_delayed_work *async_work;
1361
1362 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1363 if (!async_work)
1364 return -ENOMEM;
1365
1366 async_work->delayed_root = delayed_root;
1367 btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1368 btrfs_async_run_delayed_root, NULL, NULL);
1369 async_work->nr = nr;
1370
1371 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1372 return 0;
1373 }
1374
btrfs_assert_delayed_root_empty(struct btrfs_fs_info * fs_info)1375 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1376 {
1377 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1378 }
1379
could_end_wait(struct btrfs_delayed_root * delayed_root,int seq)1380 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1381 {
1382 int val = atomic_read(&delayed_root->items_seq);
1383
1384 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1385 return 1;
1386
1387 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1388 return 1;
1389
1390 return 0;
1391 }
1392
btrfs_balance_delayed_items(struct btrfs_fs_info * fs_info)1393 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1394 {
1395 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1396
1397 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1398 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1399 return;
1400
1401 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1402 int seq;
1403 int ret;
1404
1405 seq = atomic_read(&delayed_root->items_seq);
1406
1407 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1408 if (ret)
1409 return;
1410
1411 wait_event_interruptible(delayed_root->wait,
1412 could_end_wait(delayed_root, seq));
1413 return;
1414 }
1415
1416 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1417 }
1418
1419 /* Will return 0 or -ENOMEM */
btrfs_insert_delayed_dir_index(struct btrfs_trans_handle * trans,const char * name,int name_len,struct btrfs_inode * dir,struct btrfs_disk_key * disk_key,u8 type,u64 index)1420 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1421 const char *name, int name_len,
1422 struct btrfs_inode *dir,
1423 struct btrfs_disk_key *disk_key, u8 type,
1424 u64 index)
1425 {
1426 struct btrfs_delayed_node *delayed_node;
1427 struct btrfs_delayed_item *delayed_item;
1428 struct btrfs_dir_item *dir_item;
1429 int ret;
1430
1431 delayed_node = btrfs_get_or_create_delayed_node(dir);
1432 if (IS_ERR(delayed_node))
1433 return PTR_ERR(delayed_node);
1434
1435 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1436 if (!delayed_item) {
1437 ret = -ENOMEM;
1438 goto release_node;
1439 }
1440
1441 delayed_item->key.objectid = btrfs_ino(dir);
1442 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1443 delayed_item->key.offset = index;
1444
1445 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1446 dir_item->location = *disk_key;
1447 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1448 btrfs_set_stack_dir_data_len(dir_item, 0);
1449 btrfs_set_stack_dir_name_len(dir_item, name_len);
1450 btrfs_set_stack_dir_type(dir_item, type);
1451 memcpy((char *)(dir_item + 1), name, name_len);
1452
1453 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1454 /*
1455 * we have reserved enough space when we start a new transaction,
1456 * so reserving metadata failure is impossible
1457 */
1458 BUG_ON(ret);
1459
1460 mutex_lock(&delayed_node->mutex);
1461 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1462 if (unlikely(ret)) {
1463 btrfs_err(trans->fs_info,
1464 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1465 name_len, name, delayed_node->root->objectid,
1466 delayed_node->inode_id, ret);
1467 BUG();
1468 }
1469 mutex_unlock(&delayed_node->mutex);
1470
1471 release_node:
1472 btrfs_release_delayed_node(delayed_node);
1473 return ret;
1474 }
1475
btrfs_delete_delayed_insertion_item(struct btrfs_fs_info * fs_info,struct btrfs_delayed_node * node,struct btrfs_key * key)1476 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1477 struct btrfs_delayed_node *node,
1478 struct btrfs_key *key)
1479 {
1480 struct btrfs_delayed_item *item;
1481
1482 mutex_lock(&node->mutex);
1483 item = __btrfs_lookup_delayed_insertion_item(node, key);
1484 if (!item) {
1485 mutex_unlock(&node->mutex);
1486 return 1;
1487 }
1488
1489 btrfs_delayed_item_release_metadata(node->root, item);
1490 btrfs_release_delayed_item(item);
1491 mutex_unlock(&node->mutex);
1492 return 0;
1493 }
1494
btrfs_delete_delayed_dir_index(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,u64 index)1495 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1496 struct btrfs_inode *dir, u64 index)
1497 {
1498 struct btrfs_delayed_node *node;
1499 struct btrfs_delayed_item *item;
1500 struct btrfs_key item_key;
1501 int ret;
1502
1503 node = btrfs_get_or_create_delayed_node(dir);
1504 if (IS_ERR(node))
1505 return PTR_ERR(node);
1506
1507 item_key.objectid = btrfs_ino(dir);
1508 item_key.type = BTRFS_DIR_INDEX_KEY;
1509 item_key.offset = index;
1510
1511 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1512 &item_key);
1513 if (!ret)
1514 goto end;
1515
1516 item = btrfs_alloc_delayed_item(0);
1517 if (!item) {
1518 ret = -ENOMEM;
1519 goto end;
1520 }
1521
1522 item->key = item_key;
1523
1524 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1525 /*
1526 * we have reserved enough space when we start a new transaction,
1527 * so reserving metadata failure is impossible.
1528 */
1529 BUG_ON(ret);
1530
1531 mutex_lock(&node->mutex);
1532 ret = __btrfs_add_delayed_deletion_item(node, item);
1533 if (unlikely(ret)) {
1534 btrfs_err(trans->fs_info,
1535 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1536 index, node->root->objectid, node->inode_id, ret);
1537 BUG();
1538 }
1539 mutex_unlock(&node->mutex);
1540 end:
1541 btrfs_release_delayed_node(node);
1542 return ret;
1543 }
1544
btrfs_inode_delayed_dir_index_count(struct btrfs_inode * inode)1545 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1546 {
1547 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1548
1549 if (!delayed_node)
1550 return -ENOENT;
1551
1552 /*
1553 * Since we have held i_mutex of this directory, it is impossible that
1554 * a new directory index is added into the delayed node and index_cnt
1555 * is updated now. So we needn't lock the delayed node.
1556 */
1557 if (!delayed_node->index_cnt) {
1558 btrfs_release_delayed_node(delayed_node);
1559 return -EINVAL;
1560 }
1561
1562 inode->index_cnt = delayed_node->index_cnt;
1563 btrfs_release_delayed_node(delayed_node);
1564 return 0;
1565 }
1566
btrfs_readdir_get_delayed_items(struct inode * inode,struct list_head * ins_list,struct list_head * del_list)1567 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1568 struct list_head *ins_list,
1569 struct list_head *del_list)
1570 {
1571 struct btrfs_delayed_node *delayed_node;
1572 struct btrfs_delayed_item *item;
1573
1574 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1575 if (!delayed_node)
1576 return false;
1577
1578 /*
1579 * We can only do one readdir with delayed items at a time because of
1580 * item->readdir_list.
1581 */
1582 inode_unlock_shared(inode);
1583 inode_lock(inode);
1584
1585 mutex_lock(&delayed_node->mutex);
1586 item = __btrfs_first_delayed_insertion_item(delayed_node);
1587 while (item) {
1588 refcount_inc(&item->refs);
1589 list_add_tail(&item->readdir_list, ins_list);
1590 item = __btrfs_next_delayed_item(item);
1591 }
1592
1593 item = __btrfs_first_delayed_deletion_item(delayed_node);
1594 while (item) {
1595 refcount_inc(&item->refs);
1596 list_add_tail(&item->readdir_list, del_list);
1597 item = __btrfs_next_delayed_item(item);
1598 }
1599 mutex_unlock(&delayed_node->mutex);
1600 /*
1601 * This delayed node is still cached in the btrfs inode, so refs
1602 * must be > 1 now, and we needn't check it is going to be freed
1603 * or not.
1604 *
1605 * Besides that, this function is used to read dir, we do not
1606 * insert/delete delayed items in this period. So we also needn't
1607 * requeue or dequeue this delayed node.
1608 */
1609 refcount_dec(&delayed_node->refs);
1610
1611 return true;
1612 }
1613
btrfs_readdir_put_delayed_items(struct inode * inode,struct list_head * ins_list,struct list_head * del_list)1614 void btrfs_readdir_put_delayed_items(struct inode *inode,
1615 struct list_head *ins_list,
1616 struct list_head *del_list)
1617 {
1618 struct btrfs_delayed_item *curr, *next;
1619
1620 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1621 list_del(&curr->readdir_list);
1622 if (refcount_dec_and_test(&curr->refs))
1623 kfree(curr);
1624 }
1625
1626 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1627 list_del(&curr->readdir_list);
1628 if (refcount_dec_and_test(&curr->refs))
1629 kfree(curr);
1630 }
1631
1632 /*
1633 * The VFS is going to do up_read(), so we need to downgrade back to a
1634 * read lock.
1635 */
1636 downgrade_write(&inode->i_rwsem);
1637 }
1638
btrfs_should_delete_dir_index(struct list_head * del_list,u64 index)1639 int btrfs_should_delete_dir_index(struct list_head *del_list,
1640 u64 index)
1641 {
1642 struct btrfs_delayed_item *curr;
1643 int ret = 0;
1644
1645 list_for_each_entry(curr, del_list, readdir_list) {
1646 if (curr->key.offset > index)
1647 break;
1648 if (curr->key.offset == index) {
1649 ret = 1;
1650 break;
1651 }
1652 }
1653 return ret;
1654 }
1655
1656 /*
1657 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1658 *
1659 */
btrfs_readdir_delayed_dir_index(struct dir_context * ctx,struct list_head * ins_list)1660 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1661 struct list_head *ins_list)
1662 {
1663 struct btrfs_dir_item *di;
1664 struct btrfs_delayed_item *curr, *next;
1665 struct btrfs_key location;
1666 char *name;
1667 int name_len;
1668 int over = 0;
1669 unsigned char d_type;
1670
1671 if (list_empty(ins_list))
1672 return 0;
1673
1674 /*
1675 * Changing the data of the delayed item is impossible. So
1676 * we needn't lock them. And we have held i_mutex of the
1677 * directory, nobody can delete any directory indexes now.
1678 */
1679 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1680 list_del(&curr->readdir_list);
1681
1682 if (curr->key.offset < ctx->pos) {
1683 if (refcount_dec_and_test(&curr->refs))
1684 kfree(curr);
1685 continue;
1686 }
1687
1688 ctx->pos = curr->key.offset;
1689
1690 di = (struct btrfs_dir_item *)curr->data;
1691 name = (char *)(di + 1);
1692 name_len = btrfs_stack_dir_name_len(di);
1693
1694 d_type = btrfs_filetype_table[di->type];
1695 btrfs_disk_key_to_cpu(&location, &di->location);
1696
1697 over = !dir_emit(ctx, name, name_len,
1698 location.objectid, d_type);
1699
1700 if (refcount_dec_and_test(&curr->refs))
1701 kfree(curr);
1702
1703 if (over)
1704 return 1;
1705 ctx->pos++;
1706 }
1707 return 0;
1708 }
1709
fill_stack_inode_item(struct btrfs_trans_handle * trans,struct btrfs_inode_item * inode_item,struct inode * inode)1710 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1711 struct btrfs_inode_item *inode_item,
1712 struct inode *inode)
1713 {
1714 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1715 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1716 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1717 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1718 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1719 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1720 btrfs_set_stack_inode_generation(inode_item,
1721 BTRFS_I(inode)->generation);
1722 btrfs_set_stack_inode_sequence(inode_item,
1723 inode_peek_iversion(inode));
1724 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1725 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1726 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1727 btrfs_set_stack_inode_block_group(inode_item, 0);
1728
1729 btrfs_set_stack_timespec_sec(&inode_item->atime,
1730 inode->i_atime.tv_sec);
1731 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1732 inode->i_atime.tv_nsec);
1733
1734 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1735 inode->i_mtime.tv_sec);
1736 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1737 inode->i_mtime.tv_nsec);
1738
1739 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1740 inode->i_ctime.tv_sec);
1741 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1742 inode->i_ctime.tv_nsec);
1743
1744 btrfs_set_stack_timespec_sec(&inode_item->otime,
1745 BTRFS_I(inode)->i_otime.tv_sec);
1746 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1747 BTRFS_I(inode)->i_otime.tv_nsec);
1748 }
1749
btrfs_fill_inode(struct inode * inode,u32 * rdev)1750 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1751 {
1752 struct btrfs_delayed_node *delayed_node;
1753 struct btrfs_inode_item *inode_item;
1754
1755 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1756 if (!delayed_node)
1757 return -ENOENT;
1758
1759 mutex_lock(&delayed_node->mutex);
1760 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1761 mutex_unlock(&delayed_node->mutex);
1762 btrfs_release_delayed_node(delayed_node);
1763 return -ENOENT;
1764 }
1765
1766 inode_item = &delayed_node->inode_item;
1767
1768 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1769 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1770 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1771 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1772 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1773 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1774 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1775 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1776
1777 inode_set_iversion_queried(inode,
1778 btrfs_stack_inode_sequence(inode_item));
1779 inode->i_rdev = 0;
1780 *rdev = btrfs_stack_inode_rdev(inode_item);
1781 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1782
1783 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1784 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1785
1786 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1787 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1788
1789 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1790 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1791
1792 BTRFS_I(inode)->i_otime.tv_sec =
1793 btrfs_stack_timespec_sec(&inode_item->otime);
1794 BTRFS_I(inode)->i_otime.tv_nsec =
1795 btrfs_stack_timespec_nsec(&inode_item->otime);
1796
1797 inode->i_generation = BTRFS_I(inode)->generation;
1798 BTRFS_I(inode)->index_cnt = (u64)-1;
1799
1800 mutex_unlock(&delayed_node->mutex);
1801 btrfs_release_delayed_node(delayed_node);
1802 return 0;
1803 }
1804
btrfs_delayed_update_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode)1805 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1806 struct btrfs_root *root, struct inode *inode)
1807 {
1808 struct btrfs_delayed_node *delayed_node;
1809 int ret = 0;
1810
1811 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1812 if (IS_ERR(delayed_node))
1813 return PTR_ERR(delayed_node);
1814
1815 mutex_lock(&delayed_node->mutex);
1816 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1817 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1818 goto release_node;
1819 }
1820
1821 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1822 delayed_node);
1823 if (ret)
1824 goto release_node;
1825
1826 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1827 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1828 delayed_node->count++;
1829 atomic_inc(&root->fs_info->delayed_root->items);
1830 release_node:
1831 mutex_unlock(&delayed_node->mutex);
1832 btrfs_release_delayed_node(delayed_node);
1833 return ret;
1834 }
1835
btrfs_delayed_delete_inode_ref(struct btrfs_inode * inode)1836 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1837 {
1838 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1839 struct btrfs_delayed_node *delayed_node;
1840
1841 /*
1842 * we don't do delayed inode updates during log recovery because it
1843 * leads to enospc problems. This means we also can't do
1844 * delayed inode refs
1845 */
1846 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1847 return -EAGAIN;
1848
1849 delayed_node = btrfs_get_or_create_delayed_node(inode);
1850 if (IS_ERR(delayed_node))
1851 return PTR_ERR(delayed_node);
1852
1853 /*
1854 * We don't reserve space for inode ref deletion is because:
1855 * - We ONLY do async inode ref deletion for the inode who has only
1856 * one link(i_nlink == 1), it means there is only one inode ref.
1857 * And in most case, the inode ref and the inode item are in the
1858 * same leaf, and we will deal with them at the same time.
1859 * Since we are sure we will reserve the space for the inode item,
1860 * it is unnecessary to reserve space for inode ref deletion.
1861 * - If the inode ref and the inode item are not in the same leaf,
1862 * We also needn't worry about enospc problem, because we reserve
1863 * much more space for the inode update than it needs.
1864 * - At the worst, we can steal some space from the global reservation.
1865 * It is very rare.
1866 */
1867 mutex_lock(&delayed_node->mutex);
1868 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1869 goto release_node;
1870
1871 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1872 delayed_node->count++;
1873 atomic_inc(&fs_info->delayed_root->items);
1874 release_node:
1875 mutex_unlock(&delayed_node->mutex);
1876 btrfs_release_delayed_node(delayed_node);
1877 return 0;
1878 }
1879
__btrfs_kill_delayed_node(struct btrfs_delayed_node * delayed_node)1880 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1881 {
1882 struct btrfs_root *root = delayed_node->root;
1883 struct btrfs_fs_info *fs_info = root->fs_info;
1884 struct btrfs_delayed_item *curr_item, *prev_item;
1885
1886 mutex_lock(&delayed_node->mutex);
1887 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1888 while (curr_item) {
1889 btrfs_delayed_item_release_metadata(root, curr_item);
1890 prev_item = curr_item;
1891 curr_item = __btrfs_next_delayed_item(prev_item);
1892 btrfs_release_delayed_item(prev_item);
1893 }
1894
1895 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1896 while (curr_item) {
1897 btrfs_delayed_item_release_metadata(root, curr_item);
1898 prev_item = curr_item;
1899 curr_item = __btrfs_next_delayed_item(prev_item);
1900 btrfs_release_delayed_item(prev_item);
1901 }
1902
1903 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1904 btrfs_release_delayed_iref(delayed_node);
1905
1906 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1907 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1908 btrfs_release_delayed_inode(delayed_node);
1909 }
1910 mutex_unlock(&delayed_node->mutex);
1911 }
1912
btrfs_kill_delayed_inode_items(struct btrfs_inode * inode)1913 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1914 {
1915 struct btrfs_delayed_node *delayed_node;
1916
1917 delayed_node = btrfs_get_delayed_node(inode);
1918 if (!delayed_node)
1919 return;
1920
1921 __btrfs_kill_delayed_node(delayed_node);
1922 btrfs_release_delayed_node(delayed_node);
1923 }
1924
btrfs_kill_all_delayed_nodes(struct btrfs_root * root)1925 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1926 {
1927 u64 inode_id = 0;
1928 struct btrfs_delayed_node *delayed_nodes[8];
1929 int i, n;
1930
1931 while (1) {
1932 spin_lock(&root->inode_lock);
1933 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1934 (void **)delayed_nodes, inode_id,
1935 ARRAY_SIZE(delayed_nodes));
1936 if (!n) {
1937 spin_unlock(&root->inode_lock);
1938 break;
1939 }
1940
1941 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1942
1943 for (i = 0; i < n; i++)
1944 refcount_inc(&delayed_nodes[i]->refs);
1945 spin_unlock(&root->inode_lock);
1946
1947 for (i = 0; i < n; i++) {
1948 __btrfs_kill_delayed_node(delayed_nodes[i]);
1949 btrfs_release_delayed_node(delayed_nodes[i]);
1950 }
1951 }
1952 }
1953
btrfs_destroy_delayed_inodes(struct btrfs_fs_info * fs_info)1954 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1955 {
1956 struct btrfs_delayed_node *curr_node, *prev_node;
1957
1958 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1959 while (curr_node) {
1960 __btrfs_kill_delayed_node(curr_node);
1961
1962 prev_node = curr_node;
1963 curr_node = btrfs_next_delayed_node(curr_node);
1964 btrfs_release_delayed_node(prev_node);
1965 }
1966 }
1967
1968