Lines Matching full:ordered
131 * look find the first ordered struct that has this offset, otherwise
159 * The tree is given a single reference on the ordered extent that was
182 * The ordered extent has reserved qgroup space, release now in __btrfs_add_ordered_extent()
227 "inconsistency in ordered tree at offset %llu", in __btrfs_add_ordered_extent()
285 * when an ordered extent is finished. If the list covers more than one
286 * ordered extent, it is split across multiples.
301 * of the file. The IO may span ordered extents. If
306 * to make sure this function only returns 1 once for a given ordered extent.
349 "bad ordered accounting left %llu size %llu", in btrfs_dec_test_first_ordered_pending()
374 * of the file. The IO should not span ordered extents. If
379 * to make sure this function only returns 1 once for a given ordered extent.
412 "bad ordered accounting left %llu size %llu", in btrfs_dec_test_ordered_pending()
436 * used to drop a reference on an ordered extent. This will free
463 * remove an ordered extent from the tree. No references are dropped
543 struct btrfs_ordered_extent *ordered; in btrfs_run_ordered_extent_work() local
545 ordered = container_of(work, struct btrfs_ordered_extent, flush_work); in btrfs_run_ordered_extent_work()
546 btrfs_start_ordered_extent(ordered, 1); in btrfs_run_ordered_extent_work()
547 complete(&ordered->completion); in btrfs_run_ordered_extent_work()
551 * wait for all the ordered extents in a root. This is done when balancing
561 struct btrfs_ordered_extent *ordered, *next; in btrfs_wait_ordered_extents() local
569 ordered = list_first_entry(&splice, struct btrfs_ordered_extent, in btrfs_wait_ordered_extents()
572 if (range_end <= ordered->disk_bytenr || in btrfs_wait_ordered_extents()
573 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) { in btrfs_wait_ordered_extents()
574 list_move_tail(&ordered->root_extent_list, &skipped); in btrfs_wait_ordered_extents()
579 list_move_tail(&ordered->root_extent_list, in btrfs_wait_ordered_extents()
581 refcount_inc(&ordered->refs); in btrfs_wait_ordered_extents()
584 btrfs_init_work(&ordered->flush_work, in btrfs_wait_ordered_extents()
586 list_add_tail(&ordered->work_list, &works); in btrfs_wait_ordered_extents()
587 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); in btrfs_wait_ordered_extents()
599 list_for_each_entry_safe(ordered, next, &works, work_list) { in btrfs_wait_ordered_extents()
600 list_del_init(&ordered->work_list); in btrfs_wait_ordered_extents()
601 wait_for_completion(&ordered->completion); in btrfs_wait_ordered_extents()
602 btrfs_put_ordered_extent(ordered); in btrfs_wait_ordered_extents()
646 * Used to start IO or wait for a given ordered extent to finish.
674 * Used to wait on ordered extents across a large range of bytes.
682 struct btrfs_ordered_extent *ordered; in btrfs_wait_ordered_range() local
701 * for any ordered extents that haven't completed yet. This is to make in btrfs_wait_ordered_range()
703 * before the ordered extents complete - to avoid failures (-EEXIST) in btrfs_wait_ordered_range()
704 * when adding the new ordered extents to the ordered tree. in btrfs_wait_ordered_range()
710 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end); in btrfs_wait_ordered_range()
711 if (!ordered) in btrfs_wait_ordered_range()
713 if (ordered->file_offset > orig_end) { in btrfs_wait_ordered_range()
714 btrfs_put_ordered_extent(ordered); in btrfs_wait_ordered_range()
717 if (ordered->file_offset + ordered->num_bytes <= start) { in btrfs_wait_ordered_range()
718 btrfs_put_ordered_extent(ordered); in btrfs_wait_ordered_range()
721 btrfs_start_ordered_extent(ordered, 1); in btrfs_wait_ordered_range()
722 end = ordered->file_offset; in btrfs_wait_ordered_range()
724 * If the ordered extent had an error save the error but don't in btrfs_wait_ordered_range()
725 * exit without waiting first for all other ordered extents in in btrfs_wait_ordered_range()
728 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) in btrfs_wait_ordered_range()
730 btrfs_put_ordered_extent(ordered); in btrfs_wait_ordered_range()
739 * find an ordered extent corresponding to file_offset. return NULL if
765 /* Since the DIO code tries to lock a wide area we need to look for any ordered
806 * Adds all ordered extents to the given list. The list ends up sorted by the
807 * file_offset of the ordered extents.
819 struct btrfs_ordered_extent *ordered; in btrfs_get_ordered_extents_for_logging() local
821 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); in btrfs_get_ordered_extents_for_logging()
823 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) in btrfs_get_ordered_extents_for_logging()
826 ASSERT(list_empty(&ordered->log_list)); in btrfs_get_ordered_extents_for_logging()
827 list_add_tail(&ordered->log_list, list); in btrfs_get_ordered_extents_for_logging()
828 refcount_inc(&ordered->refs); in btrfs_get_ordered_extents_for_logging()
858 * search the ordered extents for one corresponding to 'offset' and
867 struct btrfs_ordered_extent *ordered; in btrfs_find_ordered_sum() local
876 ordered = btrfs_lookup_ordered_extent(inode, offset); in btrfs_find_ordered_sum()
877 if (!ordered) in btrfs_find_ordered_sum()
881 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { in btrfs_find_ordered_sum()
898 btrfs_put_ordered_extent(ordered); in btrfs_find_ordered_sum()
904 * ordered extents in it are run to completion.
906 * @inode: Inode whose ordered tree is to be searched
919 struct btrfs_ordered_extent *ordered; in btrfs_lock_and_flush_ordered_range() local
928 ordered = btrfs_lookup_ordered_range(inode, start, in btrfs_lock_and_flush_ordered_range()
930 if (!ordered) { in btrfs_lock_and_flush_ordered_range()
941 btrfs_start_ordered_extent(ordered, 1); in btrfs_lock_and_flush_ordered_range()
942 btrfs_put_ordered_extent(ordered); in btrfs_lock_and_flush_ordered_range()