Lines Matching refs:fs_info

154 	struct btrfs_fs_info	*fs_info;  member
212 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
242 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
252 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
253 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
275 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) in __scrub_blocked_if_needed() argument
277 while (atomic_read(&fs_info->scrub_pause_req)) { in __scrub_blocked_if_needed()
278 mutex_unlock(&fs_info->scrub_lock); in __scrub_blocked_if_needed()
279 wait_event(fs_info->scrub_pause_wait, in __scrub_blocked_if_needed()
280 atomic_read(&fs_info->scrub_pause_req) == 0); in __scrub_blocked_if_needed()
281 mutex_lock(&fs_info->scrub_lock); in __scrub_blocked_if_needed()
285 static void scrub_pause_on(struct btrfs_fs_info *fs_info) in scrub_pause_on() argument
287 atomic_inc(&fs_info->scrubs_paused); in scrub_pause_on()
288 wake_up(&fs_info->scrub_pause_wait); in scrub_pause_on()
291 static void scrub_pause_off(struct btrfs_fs_info *fs_info) in scrub_pause_off() argument
293 mutex_lock(&fs_info->scrub_lock); in scrub_pause_off()
294 __scrub_blocked_if_needed(fs_info); in scrub_pause_off()
295 atomic_dec(&fs_info->scrubs_paused); in scrub_pause_off()
296 mutex_unlock(&fs_info->scrub_lock); in scrub_pause_off()
298 wake_up(&fs_info->scrub_pause_wait); in scrub_pause_off()
301 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) in scrub_blocked_if_needed() argument
303 scrub_pause_on(fs_info); in scrub_blocked_if_needed()
304 scrub_pause_off(fs_info); in scrub_blocked_if_needed()
419 static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, in lock_full_stripe() argument
429 bg_cache = btrfs_lookup_block_group(fs_info, bytenr); in lock_full_stripe()
466 static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, in unlock_full_stripe() argument
480 bg_cache = btrfs_lookup_block_group(fs_info, bytenr); in unlock_full_stripe()
503 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow", in unlock_full_stripe()
576 struct btrfs_fs_info *fs_info = dev->fs_info; in scrub_setup_ctx() local
585 sctx->fs_info = dev->fs_info; in scrub_setup_ctx()
609 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); in scrub_setup_ctx()
620 WARN_ON(!fs_info->dev_replace.tgtdev); in scrub_setup_ctx()
622 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; in scrub_setup_ctx()
644 struct btrfs_fs_info *fs_info = swarn->dev->fs_info; in scrub_print_warning_inode() local
653 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key); in scrub_print_warning_inode()
702 btrfs_warn_in_rcu(fs_info, in scrub_print_warning_inode()
715 btrfs_warn_in_rcu(fs_info, in scrub_print_warning_inode()
729 struct btrfs_fs_info *fs_info; in scrub_print_warning() local
745 fs_info = sblock->sctx->fs_info; in scrub_print_warning()
756 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, in scrub_print_warning()
773 btrfs_warn_in_rcu(fs_info, in scrub_print_warning()
787 iterate_extent_inodes(fs_info, found_key.objectid, in scrub_print_warning()
801 static inline void scrub_put_recover(struct btrfs_fs_info *fs_info, in scrub_put_recover() argument
805 btrfs_bio_counter_dec(fs_info); in scrub_put_recover()
823 struct btrfs_fs_info *fs_info; in scrub_handle_errored_block() local
839 fs_info = sctx->fs_info; in scrub_handle_errored_block()
866 ret = lock_full_stripe(fs_info, logical, &full_stripe_locked); in scrub_handle_errored_block()
932 scrub_recheck_block(fs_info, sblock_bad, 1); in scrub_handle_errored_block()
1034 scrub_recheck_block(fs_info, sblock_other, 0); in scrub_handle_errored_block()
1128 &fs_info->dev_replace.num_write_errors); in scrub_handle_errored_block()
1153 scrub_recheck_block(fs_info, sblock_bad, 1); in scrub_handle_errored_block()
1166 btrfs_err_rl_in_rcu(fs_info, in scrub_handle_errored_block()
1175 btrfs_err_rl_in_rcu(fs_info, in scrub_handle_errored_block()
1194 scrub_put_recover(fs_info, recover); in scrub_handle_errored_block()
1204 ret = unlock_full_stripe(fs_info, logical, full_stripe_locked); in scrub_handle_errored_block()
1254 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_setup_recheck_block() local
1286 btrfs_bio_counter_inc_blocked(fs_info); in scrub_setup_recheck_block()
1287 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, in scrub_setup_recheck_block()
1291 btrfs_bio_counter_dec(fs_info); in scrub_setup_recheck_block()
1298 btrfs_bio_counter_dec(fs_info); in scrub_setup_recheck_block()
1324 scrub_put_recover(fs_info, recover); in scrub_setup_recheck_block()
1366 scrub_put_recover(fs_info, recover); in scrub_setup_recheck_block()
1380 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, in scrub_submit_raid56_bio_wait() argument
1393 ret = raid56_parity_recover(fs_info, bio, page->recover->bbio, in scrub_submit_raid56_bio_wait()
1403 static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info, in scrub_recheck_block_on_raid56() argument
1425 if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) { in scrub_recheck_block_on_raid56()
1449 static void scrub_recheck_block(struct btrfs_fs_info *fs_info, in scrub_recheck_block() argument
1459 return scrub_recheck_block_on_raid56(fs_info, sblock); in scrub_recheck_block()
1538 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info; in scrub_repair_page_from_good_copy() local
1548 btrfs_warn_rl(fs_info, in scrub_repair_page_from_good_copy()
1568 &fs_info->dev_replace.num_write_errors); in scrub_repair_page_from_good_copy()
1580 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; in scrub_write_block_to_dev_replace() local
1596 &fs_info->dev_replace.num_write_errors); in scrub_write_block_to_dev_replace()
1704 struct btrfs_fs_info *fs_info = sbio->dev->fs_info; in scrub_wr_bio_end_io() local
1711 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); in scrub_wr_bio_end_io()
1723 &sbio->sctx->fs_info->dev_replace; in scrub_wr_bio_end_io_worker()
1795 len = sctx->fs_info->sectorsize; in scrub_checksum_data()
1823 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_checksum_tree_block() local
1856 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, in scrub_checksum_tree_block()
1860 len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE; in scrub_checksum_tree_block()
2086 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; in scrub_missing_raid56_end_io() local
2093 btrfs_queue_work(fs_info->scrub_workers, &sblock->work); in scrub_missing_raid56_end_io()
2100 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_missing_raid56_worker() local
2114 btrfs_err_rl_in_rcu(fs_info, in scrub_missing_raid56_worker()
2121 btrfs_err_rl_in_rcu(fs_info, in scrub_missing_raid56_worker()
2142 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_missing_raid56_pages() local
2151 btrfs_bio_counter_inc_blocked(fs_info); in scrub_missing_raid56_pages()
2152 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, in scrub_missing_raid56_pages()
2173 rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length); in scrub_missing_raid56_pages()
2193 btrfs_bio_counter_dec(fs_info); in scrub_missing_raid56_pages()
2293 struct btrfs_fs_info *fs_info = sbio->dev->fs_info; in scrub_bio_end_io() local
2298 btrfs_queue_work(fs_info->scrub_workers, &sbio->work); in scrub_bio_end_io()
2350 int sectorsize = sparity->sctx->fs_info->sectorsize; in __scrub_mark_bitmap()
2436 index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize); in scrub_find_csum()
2439 num_sectors = sum->len / sctx->fs_info->sectorsize; in scrub_find_csum()
2462 blocksize = sctx->fs_info->sectorsize; in scrub_extent()
2471 blocksize = sctx->fs_info->nodesize; in scrub_extent()
2477 blocksize = sctx->fs_info->sectorsize; in scrub_extent()
2608 blocksize = sctx->fs_info->sectorsize; in scrub_extent_for_parity()
2714 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; in scrub_parity_bio_endio() local
2724 btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work); in scrub_parity_bio_endio()
2730 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_parity_check_and_repair() local
2743 btrfs_bio_counter_inc_blocked(fs_info); in scrub_parity_check_and_repair()
2744 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start, in scrub_parity_check_and_repair()
2754 rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio, in scrub_parity_check_and_repair()
2768 btrfs_bio_counter_dec(fs_info); in scrub_parity_check_and_repair()
2804 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_raid56_parity() local
2805 struct btrfs_root *root = fs_info->extent_root; in scrub_raid56_parity()
2806 struct btrfs_root *csum_root = fs_info->csum_root; in scrub_raid56_parity()
2826 nsectors = div_u64(map->stripe_len, fs_info->sectorsize); in scrub_raid56_parity()
2850 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) in scrub_raid56_parity()
2897 bytes = fs_info->nodesize; in scrub_raid56_parity()
2921 btrfs_err(fs_info, in scrub_raid56_parity()
2948 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, in scrub_raid56_parity()
3029 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe() local
3030 struct btrfs_root *root = fs_info->extent_root; in scrub_stripe()
3031 struct btrfs_root *csum_root = fs_info->csum_root; in scrub_stripe()
3123 scrub_blocked_if_needed(fs_info); in scrub_stripe()
3162 if (atomic_read(&fs_info->scrub_cancel_req) || in scrub_stripe()
3170 if (atomic_read(&fs_info->scrub_pause_req)) { in scrub_stripe()
3180 scrub_blocked_if_needed(fs_info); in scrub_stripe()
3201 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) in scrub_stripe()
3250 bytes = fs_info->nodesize; in scrub_stripe()
3273 btrfs_err(fs_info, in scrub_stripe()
3303 scrub_remap_extent(fs_info, extent_logical, in scrub_stripe()
3403 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_chunk() local
3404 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; in scrub_chunk()
3457 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_enumerate_chunks() local
3458 struct btrfs_root *root = fs_info->dev_root; in scrub_enumerate_chunks()
3468 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; in scrub_enumerate_chunks()
3530 cache = btrfs_lookup_block_group(fs_info, chunk_offset); in scrub_enumerate_chunks()
3545 scrub_pause_on(fs_info); in scrub_enumerate_chunks()
3568 ret = btrfs_wait_ordered_roots(fs_info, U64_MAX, in scrub_enumerate_chunks()
3580 scrub_pause_off(fs_info); in scrub_enumerate_chunks()
3586 scrub_pause_off(fs_info); in scrub_enumerate_chunks()
3600 btrfs_warn(fs_info, in scrub_enumerate_chunks()
3606 btrfs_dev_replace_write_lock(&fs_info->dev_replace); in scrub_enumerate_chunks()
3610 btrfs_dev_replace_write_unlock(&fs_info->dev_replace); in scrub_enumerate_chunks()
3633 scrub_pause_on(fs_info); in scrub_enumerate_chunks()
3644 scrub_pause_off(fs_info); in scrub_enumerate_chunks()
3646 btrfs_dev_replace_write_lock(&fs_info->dev_replace); in scrub_enumerate_chunks()
3649 btrfs_dev_replace_write_unlock(&fs_info->dev_replace); in scrub_enumerate_chunks()
3699 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_supers() local
3701 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) in scrub_supers()
3705 if (scrub_dev->fs_devices != fs_info->fs_devices) in scrub_supers()
3708 gen = fs_info->last_trans_committed; in scrub_supers()
3730 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, in scrub_workers_get() argument
3734 int max_active = fs_info->thread_pool_size; in scrub_workers_get()
3736 if (fs_info->scrub_workers_refcnt == 0) { in scrub_workers_get()
3737 fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", in scrub_workers_get()
3739 if (!fs_info->scrub_workers) in scrub_workers_get()
3742 fs_info->scrub_wr_completion_workers = in scrub_workers_get()
3743 btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, in scrub_workers_get()
3745 if (!fs_info->scrub_wr_completion_workers) in scrub_workers_get()
3748 fs_info->scrub_parity_workers = in scrub_workers_get()
3749 btrfs_alloc_workqueue(fs_info, "scrubparity", flags, in scrub_workers_get()
3751 if (!fs_info->scrub_parity_workers) in scrub_workers_get()
3754 ++fs_info->scrub_workers_refcnt; in scrub_workers_get()
3758 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); in scrub_workers_get()
3760 btrfs_destroy_workqueue(fs_info->scrub_workers); in scrub_workers_get()
3765 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) in scrub_workers_put() argument
3767 if (--fs_info->scrub_workers_refcnt == 0) { in scrub_workers_put()
3768 btrfs_destroy_workqueue(fs_info->scrub_workers); in scrub_workers_put()
3769 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); in scrub_workers_put()
3770 btrfs_destroy_workqueue(fs_info->scrub_parity_workers); in scrub_workers_put()
3772 WARN_ON(fs_info->scrub_workers_refcnt < 0); in scrub_workers_put()
3775 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, in btrfs_scrub_dev() argument
3783 if (btrfs_fs_closing(fs_info)) in btrfs_scrub_dev()
3786 if (fs_info->nodesize > BTRFS_STRIPE_LEN) { in btrfs_scrub_dev()
3792 btrfs_err(fs_info, in btrfs_scrub_dev()
3794 fs_info->nodesize, in btrfs_scrub_dev()
3799 if (fs_info->sectorsize != PAGE_SIZE) { in btrfs_scrub_dev()
3801 btrfs_err_rl(fs_info, in btrfs_scrub_dev()
3803 fs_info->sectorsize, PAGE_SIZE); in btrfs_scrub_dev()
3807 if (fs_info->nodesize > in btrfs_scrub_dev()
3809 fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) { in btrfs_scrub_dev()
3814 btrfs_err(fs_info, in btrfs_scrub_dev()
3816 fs_info->nodesize, in btrfs_scrub_dev()
3818 fs_info->sectorsize, in btrfs_scrub_dev()
3824 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3825 dev = btrfs_find_device(fs_info, devid, NULL, NULL); in btrfs_scrub_dev()
3828 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3834 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3835 btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable", in btrfs_scrub_dev()
3840 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3843 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3844 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3848 btrfs_dev_replace_read_lock(&fs_info->dev_replace); in btrfs_scrub_dev()
3851 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { in btrfs_scrub_dev()
3852 btrfs_dev_replace_read_unlock(&fs_info->dev_replace); in btrfs_scrub_dev()
3853 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3854 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3857 btrfs_dev_replace_read_unlock(&fs_info->dev_replace); in btrfs_scrub_dev()
3859 ret = scrub_workers_get(fs_info, is_dev_replace); in btrfs_scrub_dev()
3861 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3862 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3868 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3869 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3870 scrub_workers_put(fs_info); in btrfs_scrub_dev()
3875 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3881 __scrub_blocked_if_needed(fs_info); in btrfs_scrub_dev()
3882 atomic_inc(&fs_info->scrubs_running); in btrfs_scrub_dev()
3883 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3890 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3892 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3900 atomic_dec(&fs_info->scrubs_running); in btrfs_scrub_dev()
3901 wake_up(&fs_info->scrub_pause_wait); in btrfs_scrub_dev()
3908 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3910 scrub_workers_put(fs_info); in btrfs_scrub_dev()
3911 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3918 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info) in btrfs_scrub_pause() argument
3920 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3921 atomic_inc(&fs_info->scrub_pause_req); in btrfs_scrub_pause()
3922 while (atomic_read(&fs_info->scrubs_paused) != in btrfs_scrub_pause()
3923 atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_pause()
3924 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3925 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_pause()
3926 atomic_read(&fs_info->scrubs_paused) == in btrfs_scrub_pause()
3927 atomic_read(&fs_info->scrubs_running)); in btrfs_scrub_pause()
3928 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3930 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3933 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info) in btrfs_scrub_continue() argument
3935 atomic_dec(&fs_info->scrub_pause_req); in btrfs_scrub_continue()
3936 wake_up(&fs_info->scrub_pause_wait); in btrfs_scrub_continue()
3939 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) in btrfs_scrub_cancel() argument
3941 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3942 if (!atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_cancel()
3943 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3947 atomic_inc(&fs_info->scrub_cancel_req); in btrfs_scrub_cancel()
3948 while (atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_cancel()
3949 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3950 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_cancel()
3951 atomic_read(&fs_info->scrubs_running) == 0); in btrfs_scrub_cancel()
3952 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3954 atomic_dec(&fs_info->scrub_cancel_req); in btrfs_scrub_cancel()
3955 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3960 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info, in btrfs_scrub_cancel_dev() argument
3965 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3968 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3973 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3974 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_cancel_dev()
3976 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3978 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3983 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, in btrfs_scrub_progress() argument
3989 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_progress()
3990 dev = btrfs_find_device(fs_info, devid, NULL, NULL); in btrfs_scrub_progress()
3995 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_progress()
4000 static void scrub_remap_extent(struct btrfs_fs_info *fs_info, in scrub_remap_extent() argument
4011 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, in scrub_remap_extent()