Lines Matching +full:data +full:- +full:mirror

1 // SPDX-License-Identifier: GPL-2.0
13 #include "disk-io.h"
14 #include "ordered-data.h"
18 #include "dev-replace.h"
19 #include "check-integrity.h"
20 #include "rcu-string.h"
22 #include "block-group.h"
26 * This is only the first step towards a full-features scrub. It reads all
28 * is found or the extent cannot be read, good data will be written back if
32 * - In case an unrepairable extent is encountered, track which files are
34 * - track and record media errors, throw out bad devices
35 * - add a mode to also read unallocated space
121 /* The following is for the data used to check parity */
122 /* It is for the data with checksum */
149 /* Mark the parity blocks which have data */
153 * Mark the parity blocks which have data, but errors happen when
154 * read data or check data
192 * Use a ref counter to avoid use-after-free issues. Scrub workers
234 return -ENOMEM; in attach_scrub_page_private()
235 spp->logical = logical; in attach_scrub_page_private()
266 refcount_set(&sblock->refs, 1); in alloc_scrub_block()
267 sblock->sctx = sctx; in alloc_scrub_block()
268 sblock->logical = logical; in alloc_scrub_block()
269 sblock->physical = physical; in alloc_scrub_block()
270 sblock->physical_for_dev_replace = physical_for_dev_replace; in alloc_scrub_block()
271 sblock->dev = dev; in alloc_scrub_block()
272 sblock->mirror_num = mirror_num; in alloc_scrub_block()
273 sblock->no_io_error_seen = 1; in alloc_scrub_block()
289 const pgoff_t page_index = (logical - sblock->logical) >> PAGE_SHIFT; in alloc_scrub_sector()
293 ASSERT(logical - sblock->logical < U32_MAX); in alloc_scrub_sector()
300 if (!sblock->pages[page_index]) { in alloc_scrub_sector()
303 sblock->pages[page_index] = alloc_page(gfp); in alloc_scrub_sector()
304 if (!sblock->pages[page_index]) { in alloc_scrub_sector()
308 ret = attach_scrub_page_private(sblock->pages[page_index], in alloc_scrub_sector()
309 sblock->logical + (page_index << PAGE_SHIFT)); in alloc_scrub_sector()
312 __free_page(sblock->pages[page_index]); in alloc_scrub_sector()
313 sblock->pages[page_index] = NULL; in alloc_scrub_sector()
318 atomic_set(&ssector->refs, 1); in alloc_scrub_sector()
319 ssector->sblock = sblock; in alloc_scrub_sector()
321 ASSERT(sblock->sectors[sblock->sector_count] == NULL); in alloc_scrub_sector()
322 ssector->offset = logical - sblock->logical; in alloc_scrub_sector()
325 ASSERT(sblock->sector_count < SCRUB_MAX_SECTORS_PER_BLOCK); in alloc_scrub_sector()
327 sblock->sectors[sblock->sector_count] = ssector; in alloc_scrub_sector()
328 sblock->sector_count++; in alloc_scrub_sector()
329 sblock->len += sblock->sctx->fs_info->sectorsize; in alloc_scrub_sector()
336 struct scrub_block *sblock = ssector->sblock; in scrub_sector_get_page()
345 ASSERT(ssector->offset < sblock->len); in scrub_sector_get_page()
347 index = ssector->offset >> PAGE_SHIFT; in scrub_sector_get_page()
349 ASSERT(sblock->pages[index]); in scrub_sector_get_page()
350 ASSERT(PagePrivate(sblock->pages[index])); in scrub_sector_get_page()
351 return sblock->pages[index]; in scrub_sector_get_page()
356 struct scrub_block *sblock = ssector->sblock; in scrub_sector_get_page_offset()
365 ASSERT(ssector->offset < sblock->len); in scrub_sector_get_page_offset()
367 return offset_in_page(ssector->offset); in scrub_sector_get_page_offset()
426 return sector->recover && in scrub_is_page_on_raid56()
427 (sector->recover->bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK); in scrub_is_page_on_raid56()
432 refcount_inc(&sctx->refs); in scrub_pending_bio_inc()
433 atomic_inc(&sctx->bios_in_flight); in scrub_pending_bio_inc()
438 atomic_dec(&sctx->bios_in_flight); in scrub_pending_bio_dec()
439 wake_up(&sctx->list_wait); in scrub_pending_bio_dec()
445 while (atomic_read(&fs_info->scrub_pause_req)) { in __scrub_blocked_if_needed()
446 mutex_unlock(&fs_info->scrub_lock); in __scrub_blocked_if_needed()
447 wait_event(fs_info->scrub_pause_wait, in __scrub_blocked_if_needed()
448 atomic_read(&fs_info->scrub_pause_req) == 0); in __scrub_blocked_if_needed()
449 mutex_lock(&fs_info->scrub_lock); in __scrub_blocked_if_needed()
455 atomic_inc(&fs_info->scrubs_paused); in scrub_pause_on()
456 wake_up(&fs_info->scrub_pause_wait); in scrub_pause_on()
461 mutex_lock(&fs_info->scrub_lock); in scrub_pause_off()
463 atomic_dec(&fs_info->scrubs_paused); in scrub_pause_off()
464 mutex_unlock(&fs_info->scrub_lock); in scrub_pause_off()
466 wake_up(&fs_info->scrub_pause_wait); in scrub_pause_off()
480 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
482 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
494 lockdep_assert_held(&locks_root->lock); in insert_full_stripe_lock()
496 p = &locks_root->root.rb_node; in insert_full_stripe_lock()
500 if (fstripe_logical < entry->logical) { in insert_full_stripe_lock()
501 p = &(*p)->rb_left; in insert_full_stripe_lock()
502 } else if (fstripe_logical > entry->logical) { in insert_full_stripe_lock()
503 p = &(*p)->rb_right; in insert_full_stripe_lock()
505 entry->refs++; in insert_full_stripe_lock()
515 return ERR_PTR(-ENOMEM); in insert_full_stripe_lock()
516 ret->logical = fstripe_logical; in insert_full_stripe_lock()
517 ret->refs = 1; in insert_full_stripe_lock()
518 mutex_init(&ret->mutex); in insert_full_stripe_lock()
520 rb_link_node(&ret->node, parent, p); in insert_full_stripe_lock()
521 rb_insert_color(&ret->node, &locks_root->root); in insert_full_stripe_lock()
538 lockdep_assert_held(&locks_root->lock); in search_full_stripe_lock()
540 node = locks_root->root.rb_node; in search_full_stripe_lock()
543 if (fstripe_logical < entry->logical) in search_full_stripe_lock()
544 node = node->rb_left; in search_full_stripe_lock()
545 else if (fstripe_logical > entry->logical) in search_full_stripe_lock()
546 node = node->rb_right; in search_full_stripe_lock()
566 WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX); in get_full_stripe_logical()
572 ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) * in get_full_stripe_logical()
573 cache->full_stripe_len + cache->start; in get_full_stripe_logical()
601 return -ENOENT; in lock_full_stripe()
605 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) in lock_full_stripe()
607 locks_root = &bg_cache->full_stripe_locks_root; in lock_full_stripe()
612 mutex_lock(&locks_root->lock); in lock_full_stripe()
614 mutex_unlock(&locks_root->lock); in lock_full_stripe()
619 mutex_lock(&existing->mutex); in lock_full_stripe()
652 return -ENOENT; in unlock_full_stripe()
654 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) in unlock_full_stripe()
657 locks_root = &bg_cache->full_stripe_locks_root; in unlock_full_stripe()
660 mutex_lock(&locks_root->lock); in unlock_full_stripe()
665 ret = -ENOENT; in unlock_full_stripe()
666 mutex_unlock(&locks_root->lock); in unlock_full_stripe()
670 if (fstripe_lock->refs == 0) { in unlock_full_stripe()
673 fstripe_lock->logical); in unlock_full_stripe()
675 fstripe_lock->refs--; in unlock_full_stripe()
678 if (fstripe_lock->refs == 0) { in unlock_full_stripe()
679 rb_erase(&fstripe_lock->node, &locks_root->root); in unlock_full_stripe()
682 mutex_unlock(&locks_root->lock); in unlock_full_stripe()
684 mutex_unlock(&fstripe_lock->mutex); in unlock_full_stripe()
694 while (!list_empty(&sctx->csum_list)) { in scrub_free_csums()
696 sum = list_first_entry(&sctx->csum_list, in scrub_free_csums()
698 list_del(&sum->list); in scrub_free_csums()
711 if (sctx->curr != -1) { in scrub_free_ctx()
712 struct scrub_bio *sbio = sctx->bios[sctx->curr]; in scrub_free_ctx()
714 for (i = 0; i < sbio->sector_count; i++) in scrub_free_ctx()
715 scrub_block_put(sbio->sectors[i]->sblock); in scrub_free_ctx()
716 bio_put(sbio->bio); in scrub_free_ctx()
720 struct scrub_bio *sbio = sctx->bios[i]; in scrub_free_ctx()
727 kfree(sctx->wr_curr_bio); in scrub_free_ctx()
734 if (refcount_dec_and_test(&sctx->refs)) in scrub_put_ctx()
747 refcount_set(&sctx->refs, 1); in scrub_setup_ctx()
748 sctx->is_dev_replace = is_dev_replace; in scrub_setup_ctx()
749 sctx->sectors_per_bio = SCRUB_SECTORS_PER_BIO; in scrub_setup_ctx()
750 sctx->curr = -1; in scrub_setup_ctx()
751 sctx->fs_info = fs_info; in scrub_setup_ctx()
752 INIT_LIST_HEAD(&sctx->csum_list); in scrub_setup_ctx()
759 sctx->bios[i] = sbio; in scrub_setup_ctx()
761 sbio->index = i; in scrub_setup_ctx()
762 sbio->sctx = sctx; in scrub_setup_ctx()
763 sbio->sector_count = 0; in scrub_setup_ctx()
764 INIT_WORK(&sbio->work, scrub_bio_end_io_worker); in scrub_setup_ctx()
766 if (i != SCRUB_BIOS_PER_SCTX - 1) in scrub_setup_ctx()
767 sctx->bios[i]->next_free = i + 1; in scrub_setup_ctx()
769 sctx->bios[i]->next_free = -1; in scrub_setup_ctx()
771 sctx->first_free = 0; in scrub_setup_ctx()
772 atomic_set(&sctx->bios_in_flight, 0); in scrub_setup_ctx()
773 atomic_set(&sctx->workers_pending, 0); in scrub_setup_ctx()
774 atomic_set(&sctx->cancel_req, 0); in scrub_setup_ctx()
776 spin_lock_init(&sctx->list_lock); in scrub_setup_ctx()
777 spin_lock_init(&sctx->stat_lock); in scrub_setup_ctx()
778 init_waitqueue_head(&sctx->list_wait); in scrub_setup_ctx()
779 sctx->throttle_deadline = 0; in scrub_setup_ctx()
781 WARN_ON(sctx->wr_curr_bio != NULL); in scrub_setup_ctx()
782 mutex_init(&sctx->wr_lock); in scrub_setup_ctx()
783 sctx->wr_curr_bio = NULL; in scrub_setup_ctx()
785 WARN_ON(!fs_info->dev_replace.tgtdev); in scrub_setup_ctx()
786 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; in scrub_setup_ctx()
787 sctx->flush_all_writes = false; in scrub_setup_ctx()
794 return ERR_PTR(-ENOMEM); in scrub_setup_ctx()
807 struct btrfs_fs_info *fs_info = swarn->dev->fs_info; in scrub_print_warning_inode()
825 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); in scrub_print_warning_inode()
828 btrfs_release_path(swarn->path); in scrub_print_warning_inode()
832 eb = swarn->path->nodes[0]; in scrub_print_warning_inode()
833 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], in scrub_print_warning_inode()
836 btrfs_release_path(swarn->path); in scrub_print_warning_inode()
844 ipath = init_ipath(4096, local_root, swarn->path); in scrub_print_warning_inode()
861 for (i = 0; i < ipath->fspath->elem_cnt; ++i) in scrub_print_warning_inode()
864 swarn->errstr, swarn->logical, in scrub_print_warning_inode()
865 rcu_str_deref(swarn->dev->name), in scrub_print_warning_inode()
866 swarn->physical, in scrub_print_warning_inode()
868 fs_info->sectorsize, nlink, in scrub_print_warning_inode()
869 (char *)(unsigned long)ipath->fspath->val[i]); in scrub_print_warning_inode()
878 swarn->errstr, swarn->logical, in scrub_print_warning_inode()
879 rcu_str_deref(swarn->dev->name), in scrub_print_warning_inode()
880 swarn->physical, in scrub_print_warning_inode()
904 WARN_ON(sblock->sector_count < 1); in scrub_print_warning()
905 dev = sblock->dev; in scrub_print_warning()
906 fs_info = sblock->sctx->fs_info; in scrub_print_warning()
909 if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) { in scrub_print_warning()
911 errstr, rcu_str_deref(dev->name), in scrub_print_warning()
912 sblock->physical); in scrub_print_warning()
919 swarn.physical = sblock->physical; in scrub_print_warning()
920 swarn.logical = sblock->logical; in scrub_print_warning()
929 extent_item_pos = swarn.logical - found_key.objectid; in scrub_print_warning()
932 eb = path->nodes[0]; in scrub_print_warning()
933 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); in scrub_print_warning()
934 item_size = btrfs_item_size(eb, path->slots[0]); in scrub_print_warning()
944 rcu_str_deref(dev->name), in scrub_print_warning()
947 ret < 0 ? -1 : ref_level, in scrub_print_warning()
948 ret < 0 ? -1 : ref_root); in scrub_print_warning()
966 refcount_inc(&recover->refs); in scrub_get_recover()
972 if (refcount_dec_and_test(&recover->refs)) { in scrub_put_recover()
974 btrfs_put_bioc(recover->bioc); in scrub_put_recover()
989 struct scrub_ctx *sctx = sblock_to_check->sctx; in scrub_handle_errored_block()
990 struct btrfs_device *dev = sblock_to_check->dev; in scrub_handle_errored_block()
996 /* One scrub_block for each mirror */ in scrub_handle_errored_block()
1008 BUG_ON(sblock_to_check->sector_count < 1); in scrub_handle_errored_block()
1009 fs_info = sctx->fs_info; in scrub_handle_errored_block()
1010 if (sblock_to_check->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) { in scrub_handle_errored_block()
1017 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1018 ++sctx->stat.super_errors; in scrub_handle_errored_block()
1019 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1023 logical = sblock_to_check->logical; in scrub_handle_errored_block()
1024 ASSERT(sblock_to_check->mirror_num); in scrub_handle_errored_block()
1025 failed_mirror_index = sblock_to_check->mirror_num - 1; in scrub_handle_errored_block()
1026 is_metadata = !(sblock_to_check->sectors[0]->flags & in scrub_handle_errored_block()
1028 have_csum = sblock_to_check->sectors[0]->have_csum; in scrub_handle_errored_block()
1030 if (!sctx->is_dev_replace && btrfs_repair_one_zone(fs_info, logical)) in scrub_handle_errored_block()
1045 * For data corruption, Parity and Data threads will both try in scrub_handle_errored_block()
1046 * to recovery the data. in scrub_handle_errored_block()
1053 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1054 if (ret == -ENOMEM) in scrub_handle_errored_block()
1055 sctx->stat.malloc_errors++; in scrub_handle_errored_block()
1056 sctx->stat.read_errors++; in scrub_handle_errored_block()
1057 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
1058 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1064 * re-read the extent or metadata block that failed (that was in scrub_handle_errored_block()
1069 * mirror contains I/O errors, but the errors do not in scrub_handle_errored_block()
1070 * overlap, i.e. the data can be repaired by selecting the in scrub_handle_errored_block()
1073 * would be that mirror #1 has an I/O error on the first sector, in scrub_handle_errored_block()
1074 * the second sector is good, and mirror #2 has an I/O error on in scrub_handle_errored_block()
1076 * Then the first sector of the first mirror can be repaired by in scrub_handle_errored_block()
1077 * taking the first sector of the second mirror, and the in scrub_handle_errored_block()
1078 * second sector of the second mirror can be repaired by in scrub_handle_errored_block()
1079 * copying the contents of the 2nd sector of the 1st mirror. in scrub_handle_errored_block()
1080 * One more note: if the sectors of one mirror contain I/O in scrub_handle_errored_block()
1082 * the best data for repairing, the first attempt is to find in scrub_handle_errored_block()
1083 * a mirror without I/O errors and with a validated checksum. in scrub_handle_errored_block()
1104 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1105 sctx->stat.malloc_errors++; in scrub_handle_errored_block()
1106 sctx->stat.read_errors++; in scrub_handle_errored_block()
1107 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
1108 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1117 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1118 sctx->stat.read_errors++; in scrub_handle_errored_block()
1119 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
1120 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1127 /* build and submit the bios for the failed mirror, check checksums */ in scrub_handle_errored_block()
1130 if (!sblock_bad->header_error && !sblock_bad->checksum_error && in scrub_handle_errored_block()
1131 sblock_bad->no_io_error_seen) { in scrub_handle_errored_block()
1140 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1141 sctx->stat.unverified_errors++; in scrub_handle_errored_block()
1142 sblock_to_check->data_corrected = 1; in scrub_handle_errored_block()
1143 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1145 if (sctx->is_dev_replace) in scrub_handle_errored_block()
1150 if (!sblock_bad->no_io_error_seen) { in scrub_handle_errored_block()
1151 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1152 sctx->stat.read_errors++; in scrub_handle_errored_block()
1153 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1157 } else if (sblock_bad->checksum_error) { in scrub_handle_errored_block()
1158 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1159 sctx->stat.csum_errors++; in scrub_handle_errored_block()
1160 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1165 } else if (sblock_bad->header_error) { in scrub_handle_errored_block()
1166 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1167 sctx->stat.verify_errors++; in scrub_handle_errored_block()
1168 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1172 if (sblock_bad->generation_error) in scrub_handle_errored_block()
1180 if (sctx->readonly) { in scrub_handle_errored_block()
1181 ASSERT(!sctx->is_dev_replace); in scrub_handle_errored_block()
1188 * First try to pick the mirror which is completely without I/O in scrub_handle_errored_block()
1193 * If a mirror is found which is completely correct, and no in scrub_handle_errored_block()
1206 /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */ in scrub_handle_errored_block()
1207 if (!scrub_is_page_on_raid56(sblock_bad->sectors[0])) { in scrub_handle_errored_block()
1210 if (!sblocks_for_recheck[mirror_index]->sector_count) in scrub_handle_errored_block()
1215 struct scrub_recover *r = sblock_bad->sectors[0]->recover; in scrub_handle_errored_block()
1216 int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs; in scrub_handle_errored_block()
1220 if (!sblocks_for_recheck[1]->sector_count) in scrub_handle_errored_block()
1225 sblock_other->mirror_num = 1 + mirror_index; in scrub_handle_errored_block()
1231 if (!sblock_other->header_error && in scrub_handle_errored_block()
1232 !sblock_other->checksum_error && in scrub_handle_errored_block()
1233 sblock_other->no_io_error_seen) { in scrub_handle_errored_block()
1234 if (sctx->is_dev_replace) { in scrub_handle_errored_block()
1246 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) in scrub_handle_errored_block()
1260 * the 2nd sector of mirror #1 faces I/O errors, and the 2nd sector in scrub_handle_errored_block()
1261 * of mirror #2 is readable but the final checksum test fails, in scrub_handle_errored_block()
1262 * then the 2nd sector of mirror #3 could be tried, whether now in scrub_handle_errored_block()
1269 * mirror could be repaired by taking 512 byte of a different in scrub_handle_errored_block()
1270 * mirror, even if other 512 byte sectors in the same sectorsize in scrub_handle_errored_block()
1274 for (sector_num = 0; sector_num < sblock_bad->sector_count; in scrub_handle_errored_block()
1276 struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num]; in scrub_handle_errored_block()
1279 /* Skip no-io-error sectors in scrub */ in scrub_handle_errored_block()
1280 if (!sector_bad->io_error && !sctx->is_dev_replace) in scrub_handle_errored_block()
1283 if (scrub_is_page_on_raid56(sblock_bad->sectors[0])) { in scrub_handle_errored_block()
1286 * didn't work out correct data, then copy the content in scrub_handle_errored_block()
1288 * to source device, instead of writing garbage data in in scrub_handle_errored_block()
1292 } else if (sector_bad->io_error) { in scrub_handle_errored_block()
1293 /* Try to find no-io-error sector in mirrors */ in scrub_handle_errored_block()
1296 sblocks_for_recheck[mirror_index]->sector_count > 0; in scrub_handle_errored_block()
1298 if (!sblocks_for_recheck[mirror_index]-> in scrub_handle_errored_block()
1299 sectors[sector_num]->io_error) { in scrub_handle_errored_block()
1308 if (sctx->is_dev_replace) { in scrub_handle_errored_block()
1310 * Did not find a mirror to fetch the sector from. in scrub_handle_errored_block()
1312 * case (sector->io_error), by filling the block with in scrub_handle_errored_block()
1321 &fs_info->dev_replace.num_write_errors); in scrub_handle_errored_block()
1329 sector_bad->io_error = 0; in scrub_handle_errored_block()
1335 if (success && !sctx->is_dev_replace) { in scrub_handle_errored_block()
1340 * request for data to be repaired is on its way). in scrub_handle_errored_block()
1342 * which re-reads the data before the checksum in scrub_handle_errored_block()
1343 * is verified, but most likely the data comes out in scrub_handle_errored_block()
1347 if (!sblock_bad->header_error && in scrub_handle_errored_block()
1348 !sblock_bad->checksum_error && in scrub_handle_errored_block()
1349 sblock_bad->no_io_error_seen) in scrub_handle_errored_block()
1355 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1356 sctx->stat.corrected_errors++; in scrub_handle_errored_block()
1357 sblock_to_check->data_corrected = 1; in scrub_handle_errored_block()
1358 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1361 logical, rcu_str_deref(dev->name)); in scrub_handle_errored_block()
1365 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1366 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
1367 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1370 logical, rcu_str_deref(dev->name)); in scrub_handle_errored_block()
1379 /* Not allocated, continue checking the next mirror */ in scrub_handle_errored_block()
1383 for (sector_index = 0; sector_index < sblock->sector_count; in scrub_handle_errored_block()
1389 recover = sblock->sectors[sector_index]->recover; in scrub_handle_errored_block()
1392 sblock->sectors[sector_index]->recover = NULL; in scrub_handle_errored_block()
1407 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5) in scrub_nr_raid_mirrors()
1409 else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) in scrub_nr_raid_mirrors()
1412 return (int)bioc->num_stripes; in scrub_nr_raid_mirrors()
1417 int nstripes, int mirror, in scrub_stripe_index_and_offset() argument
1436 *stripe_offset = logical - raid_map[i]; in scrub_stripe_index_and_offset()
1439 *stripe_index = mirror; in scrub_stripe_index_and_offset()
1447 struct scrub_ctx *sctx = original_sblock->sctx; in scrub_setup_recheck_block()
1448 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_setup_recheck_block()
1449 u64 logical = original_sblock->logical; in scrub_setup_recheck_block()
1450 u64 length = original_sblock->sector_count << fs_info->sectorsize_bits; in scrub_setup_recheck_block()
1451 u64 generation = original_sblock->sectors[0]->generation; in scrub_setup_recheck_block()
1452 u64 flags = original_sblock->sectors[0]->flags; in scrub_setup_recheck_block()
1453 u64 have_csum = original_sblock->sectors[0]->have_csum; in scrub_setup_recheck_block()
1466 sublen = min_t(u64, length, fs_info->sectorsize); in scrub_setup_recheck_block()
1472 * one mirror in scrub_setup_recheck_block()
1480 return -EIO; in scrub_setup_recheck_block()
1487 return -ENOMEM; in scrub_setup_recheck_block()
1490 refcount_set(&recover->refs, 1); in scrub_setup_recheck_block()
1491 recover->bioc = bioc; in scrub_setup_recheck_block()
1492 recover->map_length = mapped_length; in scrub_setup_recheck_block()
1504 sblock->sctx = sctx; in scrub_setup_recheck_block()
1508 spin_lock(&sctx->stat_lock); in scrub_setup_recheck_block()
1509 sctx->stat.malloc_errors++; in scrub_setup_recheck_block()
1510 spin_unlock(&sctx->stat_lock); in scrub_setup_recheck_block()
1512 return -ENOMEM; in scrub_setup_recheck_block()
1514 sector->flags = flags; in scrub_setup_recheck_block()
1515 sector->generation = generation; in scrub_setup_recheck_block()
1516 sector->have_csum = have_csum; in scrub_setup_recheck_block()
1518 memcpy(sector->csum, in scrub_setup_recheck_block()
1519 original_sblock->sectors[0]->csum, in scrub_setup_recheck_block()
1520 sctx->fs_info->csum_size); in scrub_setup_recheck_block()
1523 bioc->map_type, in scrub_setup_recheck_block()
1524 bioc->raid_map, in scrub_setup_recheck_block()
1525 bioc->num_stripes - in scrub_setup_recheck_block()
1526 bioc->num_tgtdevs, in scrub_setup_recheck_block()
1535 sblock->physical = in scrub_setup_recheck_block()
1536 bioc->stripes[stripe_index].physical + in scrub_setup_recheck_block()
1538 sblock->dev = bioc->stripes[stripe_index].dev; in scrub_setup_recheck_block()
1539 sblock->physical_for_dev_replace = in scrub_setup_recheck_block()
1540 original_sblock->physical_for_dev_replace; in scrub_setup_recheck_block()
1543 BUG_ON(sector_index >= original_sblock->sector_count); in scrub_setup_recheck_block()
1545 sector->recover = recover; in scrub_setup_recheck_block()
1548 length -= sublen; in scrub_setup_recheck_block()
1558 complete(bio->bi_private); in scrub_bio_wait_endio()
1567 bio->bi_iter.bi_sector = (sector->offset + sector->sblock->logical) >> in scrub_submit_raid56_bio_wait()
1569 bio->bi_private = &done; in scrub_submit_raid56_bio_wait()
1570 bio->bi_end_io = scrub_bio_wait_endio; in scrub_submit_raid56_bio_wait()
1571 raid56_parity_recover(bio, sector->recover->bioc, sector->sblock->mirror_num); in scrub_submit_raid56_bio_wait()
1574 return blk_status_to_errno(bio->bi_status); in scrub_submit_raid56_bio_wait()
1580 struct scrub_sector *first_sector = sblock->sectors[0]; in scrub_recheck_block_on_raid56()
1585 ASSERT(sblock->dev); in scrub_recheck_block_on_raid56()
1586 if (!sblock->dev->bdev) in scrub_recheck_block_on_raid56()
1589 bio = bio_alloc(sblock->dev->bdev, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS); in scrub_recheck_block_on_raid56()
1591 for (i = 0; i < sblock->sector_count; i++) { in scrub_recheck_block_on_raid56()
1592 struct scrub_sector *sector = sblock->sectors[i]; in scrub_recheck_block_on_raid56()
1594 bio_add_scrub_sector(bio, sector, fs_info->sectorsize); in scrub_recheck_block_on_raid56()
1608 for (i = 0; i < sblock->sector_count; i++) in scrub_recheck_block_on_raid56()
1609 sblock->sectors[i]->io_error = 1; in scrub_recheck_block_on_raid56()
1611 sblock->no_io_error_seen = 0; in scrub_recheck_block_on_raid56()
1615 * This function will check the on disk data for checksum errors, header errors
1619 * are errored in the just handled mirror can be repaired.
1627 sblock->no_io_error_seen = 1; in scrub_recheck_block()
1630 if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->sectors[0])) in scrub_recheck_block()
1633 for (i = 0; i < sblock->sector_count; i++) { in scrub_recheck_block()
1634 struct scrub_sector *sector = sblock->sectors[i]; in scrub_recheck_block()
1638 if (sblock->dev->bdev == NULL) { in scrub_recheck_block()
1639 sector->io_error = 1; in scrub_recheck_block()
1640 sblock->no_io_error_seen = 0; in scrub_recheck_block()
1644 bio_init(&bio, sblock->dev->bdev, &bvec, 1, REQ_OP_READ); in scrub_recheck_block()
1645 bio_add_scrub_sector(&bio, sector, fs_info->sectorsize); in scrub_recheck_block()
1646 bio.bi_iter.bi_sector = (sblock->physical + sector->offset) >> in scrub_recheck_block()
1651 sector->io_error = 1; in scrub_recheck_block()
1652 sblock->no_io_error_seen = 0; in scrub_recheck_block()
1658 if (sblock->no_io_error_seen) in scrub_recheck_block()
1664 struct btrfs_fs_devices *fs_devices = sector->sblock->dev->fs_devices; in scrub_check_fsid()
1667 ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE); in scrub_check_fsid()
1673 sblock->header_error = 0; in scrub_recheck_block_checksum()
1674 sblock->checksum_error = 0; in scrub_recheck_block_checksum()
1675 sblock->generation_error = 0; in scrub_recheck_block_checksum()
1677 if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_DATA) in scrub_recheck_block_checksum()
1689 for (i = 0; i < sblock_bad->sector_count; i++) { in scrub_repair_block_from_good_copy()
1705 struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num]; in scrub_repair_sector_from_good_copy()
1706 struct scrub_sector *sector_good = sblock_good->sectors[sector_num]; in scrub_repair_sector_from_good_copy()
1707 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info; in scrub_repair_sector_from_good_copy()
1708 const u32 sectorsize = fs_info->sectorsize; in scrub_repair_sector_from_good_copy()
1710 if (force_write || sblock_bad->header_error || in scrub_repair_sector_from_good_copy()
1711 sblock_bad->checksum_error || sector_bad->io_error) { in scrub_repair_sector_from_good_copy()
1716 if (!sblock_bad->dev->bdev) { in scrub_repair_sector_from_good_copy()
1719 return -EIO; in scrub_repair_sector_from_good_copy()
1722 bio_init(&bio, sblock_bad->dev->bdev, &bvec, 1, REQ_OP_WRITE); in scrub_repair_sector_from_good_copy()
1723 bio.bi_iter.bi_sector = (sblock_bad->physical + in scrub_repair_sector_from_good_copy()
1724 sector_bad->offset) >> SECTOR_SHIFT; in scrub_repair_sector_from_good_copy()
1732 btrfs_dev_stat_inc_and_print(sblock_bad->dev, in scrub_repair_sector_from_good_copy()
1734 atomic64_inc(&fs_info->dev_replace.num_write_errors); in scrub_repair_sector_from_good_copy()
1735 return -EIO; in scrub_repair_sector_from_good_copy()
1744 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; in scrub_write_block_to_dev_replace()
1749 * so the data needn't be written into the destination device. in scrub_write_block_to_dev_replace()
1751 if (sblock->sparity) in scrub_write_block_to_dev_replace()
1754 for (i = 0; i < sblock->sector_count; i++) { in scrub_write_block_to_dev_replace()
1759 atomic64_inc(&fs_info->dev_replace.num_write_errors); in scrub_write_block_to_dev_replace()
1765 const u32 sectorsize = sblock->sctx->fs_info->sectorsize; in scrub_write_sector_to_dev_replace()
1766 struct scrub_sector *sector = sblock->sectors[sector_num]; in scrub_write_sector_to_dev_replace()
1768 if (sector->io_error) in scrub_write_sector_to_dev_replace()
1771 return scrub_add_sector_to_wr_bio(sblock->sctx, sector); in scrub_write_sector_to_dev_replace()
1779 if (!btrfs_is_zoned(sctx->fs_info)) in fill_writer_pointer_gap()
1782 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) in fill_writer_pointer_gap()
1785 if (sctx->write_pointer < physical) { in fill_writer_pointer_gap()
1786 length = physical - sctx->write_pointer; in fill_writer_pointer_gap()
1788 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev, in fill_writer_pointer_gap()
1789 sctx->write_pointer, length); in fill_writer_pointer_gap()
1791 sctx->write_pointer = physical; in fill_writer_pointer_gap()
1798 refcount_inc(&sblock->refs); in scrub_block_get()
1804 struct scrub_block *sblock = sector->sblock; in scrub_add_sector_to_wr_bio()
1807 const u32 sectorsize = sctx->fs_info->sectorsize; in scrub_add_sector_to_wr_bio()
1809 mutex_lock(&sctx->wr_lock); in scrub_add_sector_to_wr_bio()
1811 if (!sctx->wr_curr_bio) { in scrub_add_sector_to_wr_bio()
1812 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio), in scrub_add_sector_to_wr_bio()
1814 if (!sctx->wr_curr_bio) { in scrub_add_sector_to_wr_bio()
1815 mutex_unlock(&sctx->wr_lock); in scrub_add_sector_to_wr_bio()
1816 return -ENOMEM; in scrub_add_sector_to_wr_bio()
1818 sctx->wr_curr_bio->sctx = sctx; in scrub_add_sector_to_wr_bio()
1819 sctx->wr_curr_bio->sector_count = 0; in scrub_add_sector_to_wr_bio()
1821 sbio = sctx->wr_curr_bio; in scrub_add_sector_to_wr_bio()
1822 if (sbio->sector_count == 0) { in scrub_add_sector_to_wr_bio()
1823 ret = fill_writer_pointer_gap(sctx, sector->offset + in scrub_add_sector_to_wr_bio()
1824 sblock->physical_for_dev_replace); in scrub_add_sector_to_wr_bio()
1826 mutex_unlock(&sctx->wr_lock); in scrub_add_sector_to_wr_bio()
1830 sbio->physical = sblock->physical_for_dev_replace + sector->offset; in scrub_add_sector_to_wr_bio()
1831 sbio->logical = sblock->logical + sector->offset; in scrub_add_sector_to_wr_bio()
1832 sbio->dev = sctx->wr_tgtdev; in scrub_add_sector_to_wr_bio()
1833 if (!sbio->bio) { in scrub_add_sector_to_wr_bio()
1834 sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio, in scrub_add_sector_to_wr_bio()
1837 sbio->bio->bi_private = sbio; in scrub_add_sector_to_wr_bio()
1838 sbio->bio->bi_end_io = scrub_wr_bio_end_io; in scrub_add_sector_to_wr_bio()
1839 sbio->bio->bi_iter.bi_sector = sbio->physical >> 9; in scrub_add_sector_to_wr_bio()
1840 sbio->status = 0; in scrub_add_sector_to_wr_bio()
1841 } else if (sbio->physical + sbio->sector_count * sectorsize != in scrub_add_sector_to_wr_bio()
1842 sblock->physical_for_dev_replace + sector->offset || in scrub_add_sector_to_wr_bio()
1843 sbio->logical + sbio->sector_count * sectorsize != in scrub_add_sector_to_wr_bio()
1844 sblock->logical + sector->offset) { in scrub_add_sector_to_wr_bio()
1849 ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize); in scrub_add_sector_to_wr_bio()
1851 if (sbio->sector_count < 1) { in scrub_add_sector_to_wr_bio()
1852 bio_put(sbio->bio); in scrub_add_sector_to_wr_bio()
1853 sbio->bio = NULL; in scrub_add_sector_to_wr_bio()
1854 mutex_unlock(&sctx->wr_lock); in scrub_add_sector_to_wr_bio()
1855 return -EIO; in scrub_add_sector_to_wr_bio()
1861 sbio->sectors[sbio->sector_count] = sector; in scrub_add_sector_to_wr_bio()
1868 scrub_block_get(sector->sblock); in scrub_add_sector_to_wr_bio()
1870 sbio->sector_count++; in scrub_add_sector_to_wr_bio()
1871 if (sbio->sector_count == sctx->sectors_per_bio) in scrub_add_sector_to_wr_bio()
1873 mutex_unlock(&sctx->wr_lock); in scrub_add_sector_to_wr_bio()
1882 if (!sctx->wr_curr_bio) in scrub_wr_submit()
1885 sbio = sctx->wr_curr_bio; in scrub_wr_submit()
1886 sctx->wr_curr_bio = NULL; in scrub_wr_submit()
1892 btrfsic_check_bio(sbio->bio); in scrub_wr_submit()
1893 submit_bio(sbio->bio); in scrub_wr_submit()
1895 if (btrfs_is_zoned(sctx->fs_info)) in scrub_wr_submit()
1896 sctx->write_pointer = sbio->physical + sbio->sector_count * in scrub_wr_submit()
1897 sctx->fs_info->sectorsize; in scrub_wr_submit()
1902 struct scrub_bio *sbio = bio->bi_private; in scrub_wr_bio_end_io()
1903 struct btrfs_fs_info *fs_info = sbio->dev->fs_info; in scrub_wr_bio_end_io()
1905 sbio->status = bio->bi_status; in scrub_wr_bio_end_io()
1906 sbio->bio = bio; in scrub_wr_bio_end_io()
1908 INIT_WORK(&sbio->work, scrub_wr_bio_end_io_worker); in scrub_wr_bio_end_io()
1909 queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); in scrub_wr_bio_end_io()
1915 struct scrub_ctx *sctx = sbio->sctx; in scrub_wr_bio_end_io_worker()
1918 ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO); in scrub_wr_bio_end_io_worker()
1919 if (sbio->status) { in scrub_wr_bio_end_io_worker()
1921 &sbio->sctx->fs_info->dev_replace; in scrub_wr_bio_end_io_worker()
1923 for (i = 0; i < sbio->sector_count; i++) { in scrub_wr_bio_end_io_worker()
1924 struct scrub_sector *sector = sbio->sectors[i]; in scrub_wr_bio_end_io_worker()
1926 sector->io_error = 1; in scrub_wr_bio_end_io_worker()
1927 atomic64_inc(&dev_replace->num_write_errors); in scrub_wr_bio_end_io_worker()
1935 for (i = 0; i < sbio->sector_count; i++) { in scrub_wr_bio_end_io_worker()
1936 scrub_block_put(sbio->sectors[i]->sblock); in scrub_wr_bio_end_io_worker()
1937 scrub_sector_put(sbio->sectors[i]); in scrub_wr_bio_end_io_worker()
1940 bio_put(sbio->bio); in scrub_wr_bio_end_io_worker()
1958 sblock->header_error = 0; in scrub_checksum()
1959 sblock->generation_error = 0; in scrub_checksum()
1960 sblock->checksum_error = 0; in scrub_checksum()
1962 WARN_ON(sblock->sector_count < 1); in scrub_checksum()
1963 flags = sblock->sectors[0]->flags; in scrub_checksum()
1981 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_data()
1982 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_checksum_data()
1983 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); in scrub_checksum_data()
1988 BUG_ON(sblock->sector_count < 1); in scrub_checksum_data()
1989 sector = sblock->sectors[0]; in scrub_checksum_data()
1990 if (!sector->have_csum) in scrub_checksum_data()
1995 shash->tfm = fs_info->csum_shash; in scrub_checksum_data()
1998 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); in scrub_checksum_data()
2000 if (memcmp(csum, sector->csum, fs_info->csum_size)) in scrub_checksum_data()
2001 sblock->checksum_error = 1; in scrub_checksum_data()
2002 return sblock->checksum_error; in scrub_checksum_data()
2007 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_tree_block()
2009 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_checksum_tree_block()
2010 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); in scrub_checksum_tree_block()
2016 * to change so we don't misuse data and metadata units like that. in scrub_checksum_tree_block()
2018 const u32 sectorsize = sctx->fs_info->sectorsize; in scrub_checksum_tree_block()
2019 const int num_sectors = fs_info->nodesize >> fs_info->sectorsize_bits; in scrub_checksum_tree_block()
2024 BUG_ON(sblock->sector_count < 1); in scrub_checksum_tree_block()
2027 ASSERT(sblock->sector_count == num_sectors); in scrub_checksum_tree_block()
2029 sector = sblock->sectors[0]; in scrub_checksum_tree_block()
2032 memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size); in scrub_checksum_tree_block()
2039 if (sblock->logical != btrfs_stack_header_bytenr(h)) in scrub_checksum_tree_block()
2040 sblock->header_error = 1; in scrub_checksum_tree_block()
2042 if (sector->generation != btrfs_stack_header_generation(h)) { in scrub_checksum_tree_block()
2043 sblock->header_error = 1; in scrub_checksum_tree_block()
2044 sblock->generation_error = 1; in scrub_checksum_tree_block()
2047 if (!scrub_check_fsid(h->fsid, sector)) in scrub_checksum_tree_block()
2048 sblock->header_error = 1; in scrub_checksum_tree_block()
2050 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, in scrub_checksum_tree_block()
2052 sblock->header_error = 1; in scrub_checksum_tree_block()
2054 shash->tfm = fs_info->csum_shash; in scrub_checksum_tree_block()
2057 sectorsize - BTRFS_CSUM_SIZE); in scrub_checksum_tree_block()
2060 kaddr = scrub_sector_get_kaddr(sblock->sectors[i]); in scrub_checksum_tree_block()
2065 if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size)) in scrub_checksum_tree_block()
2066 sblock->checksum_error = 1; in scrub_checksum_tree_block()
2068 return sblock->header_error || sblock->checksum_error; in scrub_checksum_tree_block()
2074 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_super()
2075 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_checksum_super()
2076 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); in scrub_checksum_super()
2083 BUG_ON(sblock->sector_count < 1); in scrub_checksum_super()
2084 sector = sblock->sectors[0]; in scrub_checksum_super()
2088 if (sblock->logical != btrfs_super_bytenr(s)) in scrub_checksum_super()
2091 if (sector->generation != btrfs_super_generation(s)) in scrub_checksum_super()
2094 if (!scrub_check_fsid(s->fsid, sector)) in scrub_checksum_super()
2097 shash->tfm = fs_info->csum_shash; in scrub_checksum_super()
2100 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum); in scrub_checksum_super()
2102 if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size)) in scrub_checksum_super()
2110 if (refcount_dec_and_test(&sblock->refs)) { in scrub_block_put()
2113 if (sblock->sparity) in scrub_block_put()
2114 scrub_parity_put(sblock->sparity); in scrub_block_put()
2116 for (i = 0; i < sblock->sector_count; i++) in scrub_block_put()
2117 scrub_sector_put(sblock->sectors[i]); in scrub_block_put()
2118 for (i = 0; i < DIV_ROUND_UP(sblock->len, PAGE_SIZE); i++) { in scrub_block_put()
2119 if (sblock->pages[i]) { in scrub_block_put()
2120 detach_scrub_page_private(sblock->pages[i]); in scrub_block_put()
2121 __free_page(sblock->pages[i]); in scrub_block_put()
2130 atomic_inc(&sector->refs); in scrub_sector_get()
2135 if (atomic_dec_and_test(&sector->refs)) in scrub_sector_put()
2140 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
2153 sbio = sctx->bios[sctx->curr]; in scrub_throttle()
2154 device = sbio->dev; in scrub_throttle()
2155 bwlimit = READ_ONCE(device->scrub_speed_max); in scrub_throttle()
2168 if (sctx->throttle_deadline == 0) { in scrub_throttle()
2169 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div); in scrub_throttle()
2170 sctx->throttle_sent = 0; in scrub_throttle()
2174 if (ktime_before(now, sctx->throttle_deadline)) { in scrub_throttle()
2176 sctx->throttle_sent += sbio->bio->bi_iter.bi_size; in scrub_throttle()
2177 if (sctx->throttle_sent <= div_u64(bwlimit, div)) in scrub_throttle()
2181 delta = ktime_ms_delta(sctx->throttle_deadline, now); in scrub_throttle()
2195 sctx->throttle_deadline = 0; in scrub_throttle()
2202 if (sctx->curr == -1) in scrub_submit()
2207 sbio = sctx->bios[sctx->curr]; in scrub_submit()
2208 sctx->curr = -1; in scrub_submit()
2210 btrfsic_check_bio(sbio->bio); in scrub_submit()
2211 submit_bio(sbio->bio); in scrub_submit()
2217 struct scrub_block *sblock = sector->sblock; in scrub_add_sector_to_rd_bio()
2219 const u32 sectorsize = sctx->fs_info->sectorsize; in scrub_add_sector_to_rd_bio()
2226 while (sctx->curr == -1) { in scrub_add_sector_to_rd_bio()
2227 spin_lock(&sctx->list_lock); in scrub_add_sector_to_rd_bio()
2228 sctx->curr = sctx->first_free; in scrub_add_sector_to_rd_bio()
2229 if (sctx->curr != -1) { in scrub_add_sector_to_rd_bio()
2230 sctx->first_free = sctx->bios[sctx->curr]->next_free; in scrub_add_sector_to_rd_bio()
2231 sctx->bios[sctx->curr]->next_free = -1; in scrub_add_sector_to_rd_bio()
2232 sctx->bios[sctx->curr]->sector_count = 0; in scrub_add_sector_to_rd_bio()
2233 spin_unlock(&sctx->list_lock); in scrub_add_sector_to_rd_bio()
2235 spin_unlock(&sctx->list_lock); in scrub_add_sector_to_rd_bio()
2236 wait_event(sctx->list_wait, sctx->first_free != -1); in scrub_add_sector_to_rd_bio()
2239 sbio = sctx->bios[sctx->curr]; in scrub_add_sector_to_rd_bio()
2240 if (sbio->sector_count == 0) { in scrub_add_sector_to_rd_bio()
2241 sbio->physical = sblock->physical + sector->offset; in scrub_add_sector_to_rd_bio()
2242 sbio->logical = sblock->logical + sector->offset; in scrub_add_sector_to_rd_bio()
2243 sbio->dev = sblock->dev; in scrub_add_sector_to_rd_bio()
2244 if (!sbio->bio) { in scrub_add_sector_to_rd_bio()
2245 sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio, in scrub_add_sector_to_rd_bio()
2248 sbio->bio->bi_private = sbio; in scrub_add_sector_to_rd_bio()
2249 sbio->bio->bi_end_io = scrub_bio_end_io; in scrub_add_sector_to_rd_bio()
2250 sbio->bio->bi_iter.bi_sector = sbio->physical >> 9; in scrub_add_sector_to_rd_bio()
2251 sbio->status = 0; in scrub_add_sector_to_rd_bio()
2252 } else if (sbio->physical + sbio->sector_count * sectorsize != in scrub_add_sector_to_rd_bio()
2253 sblock->physical + sector->offset || in scrub_add_sector_to_rd_bio()
2254 sbio->logical + sbio->sector_count * sectorsize != in scrub_add_sector_to_rd_bio()
2255 sblock->logical + sector->offset || in scrub_add_sector_to_rd_bio()
2256 sbio->dev != sblock->dev) { in scrub_add_sector_to_rd_bio()
2261 sbio->sectors[sbio->sector_count] = sector; in scrub_add_sector_to_rd_bio()
2262 ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize); in scrub_add_sector_to_rd_bio()
2264 if (sbio->sector_count < 1) { in scrub_add_sector_to_rd_bio()
2265 bio_put(sbio->bio); in scrub_add_sector_to_rd_bio()
2266 sbio->bio = NULL; in scrub_add_sector_to_rd_bio()
2267 return -EIO; in scrub_add_sector_to_rd_bio()
2274 atomic_inc(&sblock->outstanding_sectors); in scrub_add_sector_to_rd_bio()
2275 sbio->sector_count++; in scrub_add_sector_to_rd_bio()
2276 if (sbio->sector_count == sctx->sectors_per_bio) in scrub_add_sector_to_rd_bio()
2284 struct scrub_block *sblock = bio->bi_private; in scrub_missing_raid56_end_io()
2285 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; in scrub_missing_raid56_end_io()
2288 if (bio->bi_status) in scrub_missing_raid56_end_io()
2289 sblock->no_io_error_seen = 0; in scrub_missing_raid56_end_io()
2293 queue_work(fs_info->scrub_workers, &sblock->work); in scrub_missing_raid56_end_io()
2299 struct scrub_ctx *sctx = sblock->sctx; in scrub_missing_raid56_worker()
2300 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_missing_raid56_worker()
2304 logical = sblock->logical; in scrub_missing_raid56_worker()
2305 dev = sblock->dev; in scrub_missing_raid56_worker()
2307 if (sblock->no_io_error_seen) in scrub_missing_raid56_worker()
2310 if (!sblock->no_io_error_seen) { in scrub_missing_raid56_worker()
2311 spin_lock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2312 sctx->stat.read_errors++; in scrub_missing_raid56_worker()
2313 spin_unlock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2316 logical, rcu_str_deref(dev->name)); in scrub_missing_raid56_worker()
2317 } else if (sblock->header_error || sblock->checksum_error) { in scrub_missing_raid56_worker()
2318 spin_lock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2319 sctx->stat.uncorrectable_errors++; in scrub_missing_raid56_worker()
2320 spin_unlock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2323 logical, rcu_str_deref(dev->name)); in scrub_missing_raid56_worker()
2328 if (sctx->is_dev_replace && sctx->flush_all_writes) { in scrub_missing_raid56_worker()
2329 mutex_lock(&sctx->wr_lock); in scrub_missing_raid56_worker()
2331 mutex_unlock(&sctx->wr_lock); in scrub_missing_raid56_worker()
2340 struct scrub_ctx *sctx = sblock->sctx; in scrub_missing_raid56_pages()
2341 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_missing_raid56_pages()
2342 u64 length = sblock->sector_count << fs_info->sectorsize_bits; in scrub_missing_raid56_pages()
2343 u64 logical = sblock->logical; in scrub_missing_raid56_pages()
2353 if (ret || !bioc || !bioc->raid_map) in scrub_missing_raid56_pages()
2356 if (WARN_ON(!sctx->is_dev_replace || in scrub_missing_raid56_pages()
2357 !(bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) { in scrub_missing_raid56_pages()
2368 bio->bi_iter.bi_sector = logical >> 9; in scrub_missing_raid56_pages()
2369 bio->bi_private = sblock; in scrub_missing_raid56_pages()
2370 bio->bi_end_io = scrub_missing_raid56_end_io; in scrub_missing_raid56_pages()
2376 for (i = 0; i < sblock->sector_count; i++) { in scrub_missing_raid56_pages()
2377 struct scrub_sector *sector = sblock->sectors[i]; in scrub_missing_raid56_pages()
2381 sector->offset + sector->sblock->logical); in scrub_missing_raid56_pages()
2384 INIT_WORK(&sblock->work, scrub_missing_raid56_worker); in scrub_missing_raid56_pages()
2396 spin_lock(&sctx->stat_lock); in scrub_missing_raid56_pages()
2397 sctx->stat.malloc_errors++; in scrub_missing_raid56_pages()
2398 spin_unlock(&sctx->stat_lock); in scrub_missing_raid56_pages()
2407 const u32 sectorsize = sctx->fs_info->sectorsize; in scrub_sectors()
2413 spin_lock(&sctx->stat_lock); in scrub_sectors()
2414 sctx->stat.malloc_errors++; in scrub_sectors()
2415 spin_unlock(&sctx->stat_lock); in scrub_sectors()
2416 return -ENOMEM; in scrub_sectors()
2430 spin_lock(&sctx->stat_lock); in scrub_sectors()
2431 sctx->stat.malloc_errors++; in scrub_sectors()
2432 spin_unlock(&sctx->stat_lock); in scrub_sectors()
2434 return -ENOMEM; in scrub_sectors()
2436 sector->flags = flags; in scrub_sectors()
2437 sector->generation = gen; in scrub_sectors()
2439 sector->have_csum = 1; in scrub_sectors()
2440 memcpy(sector->csum, csum, sctx->fs_info->csum_size); in scrub_sectors()
2442 sector->have_csum = 0; in scrub_sectors()
2444 len -= l; in scrub_sectors()
2450 WARN_ON(sblock->sector_count == 0); in scrub_sectors()
2451 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { in scrub_sectors()
2458 for (index = 0; index < sblock->sector_count; index++) { in scrub_sectors()
2459 struct scrub_sector *sector = sblock->sectors[index]; in scrub_sectors()
2480 struct scrub_bio *sbio = bio->bi_private; in scrub_bio_end_io()
2481 struct btrfs_fs_info *fs_info = sbio->dev->fs_info; in scrub_bio_end_io()
2483 sbio->status = bio->bi_status; in scrub_bio_end_io()
2484 sbio->bio = bio; in scrub_bio_end_io()
2486 queue_work(fs_info->scrub_workers, &sbio->work); in scrub_bio_end_io()
2492 struct scrub_ctx *sctx = sbio->sctx; in scrub_bio_end_io_worker()
2495 ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO); in scrub_bio_end_io_worker()
2496 if (sbio->status) { in scrub_bio_end_io_worker()
2497 for (i = 0; i < sbio->sector_count; i++) { in scrub_bio_end_io_worker()
2498 struct scrub_sector *sector = sbio->sectors[i]; in scrub_bio_end_io_worker()
2500 sector->io_error = 1; in scrub_bio_end_io_worker()
2501 sector->sblock->no_io_error_seen = 0; in scrub_bio_end_io_worker()
2506 for (i = 0; i < sbio->sector_count; i++) { in scrub_bio_end_io_worker()
2507 struct scrub_sector *sector = sbio->sectors[i]; in scrub_bio_end_io_worker()
2508 struct scrub_block *sblock = sector->sblock; in scrub_bio_end_io_worker()
2510 if (atomic_dec_and_test(&sblock->outstanding_sectors)) in scrub_bio_end_io_worker()
2515 bio_put(sbio->bio); in scrub_bio_end_io_worker()
2516 sbio->bio = NULL; in scrub_bio_end_io_worker()
2517 spin_lock(&sctx->list_lock); in scrub_bio_end_io_worker()
2518 sbio->next_free = sctx->first_free; in scrub_bio_end_io_worker()
2519 sctx->first_free = sbio->index; in scrub_bio_end_io_worker()
2520 spin_unlock(&sctx->list_lock); in scrub_bio_end_io_worker()
2522 if (sctx->is_dev_replace && sctx->flush_all_writes) { in scrub_bio_end_io_worker()
2523 mutex_lock(&sctx->wr_lock); in scrub_bio_end_io_worker()
2525 mutex_unlock(&sctx->wr_lock); in scrub_bio_end_io_worker()
2537 u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits; in __scrub_mark_bitmap()
2539 if (len >= sparity->stripe_len) { in __scrub_mark_bitmap()
2540 bitmap_set(bitmap, 0, sparity->nsectors); in __scrub_mark_bitmap()
2544 start -= sparity->logic_start; in __scrub_mark_bitmap()
2545 start = div64_u64_rem(start, sparity->stripe_len, &offset); in __scrub_mark_bitmap()
2549 if (offset + nsectors <= sparity->nsectors) { in __scrub_mark_bitmap()
2554 bitmap_set(bitmap, offset, sparity->nsectors - offset); in __scrub_mark_bitmap()
2555 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset)); in __scrub_mark_bitmap()
2561 __scrub_mark_bitmap(sparity, &sparity->ebitmap, start, len); in scrub_parity_mark_sectors_error()
2567 __scrub_mark_bitmap(sparity, &sparity->dbitmap, start, len); in scrub_parity_mark_sectors_data()
2574 if (!sblock->no_io_error_seen) { in scrub_block_complete()
2584 if (!corrupted && sblock->sctx->is_dev_replace) in scrub_block_complete()
2588 if (sblock->sparity && corrupted && !sblock->data_corrected) { in scrub_block_complete()
2589 u64 start = sblock->logical; in scrub_block_complete()
2590 u64 end = sblock->logical + in scrub_block_complete()
2591 sblock->sectors[sblock->sector_count - 1]->offset + in scrub_block_complete()
2592 sblock->sctx->fs_info->sectorsize; in scrub_block_complete()
2594 ASSERT(end - start <= U32_MAX); in scrub_block_complete()
2595 scrub_parity_mark_sectors_error(sblock->sparity, in scrub_block_complete()
2596 start, end - start); in scrub_block_complete()
2602 sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits; in drop_csum_range()
2603 list_del(&sum->list); in drop_csum_range()
2611 * The search source is sctx->csum_list, which is a pre-populated list
2622 while (!list_empty(&sctx->csum_list)) { in scrub_find_csum()
2627 sum = list_first_entry(&sctx->csum_list, in scrub_find_csum()
2630 if (sum->bytenr > logical) in scrub_find_csum()
2639 if (sum->bytenr + sum->len <= logical) { in scrub_find_csum()
2646 index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits; in scrub_find_csum()
2647 num_sectors = sum->len >> sctx->fs_info->sectorsize_bits; in scrub_find_csum()
2649 memcpy(csum, sum->sums + index * sctx->fs_info->csum_size, in scrub_find_csum()
2650 sctx->fs_info->csum_size); in scrub_find_csum()
2653 if (index == num_sectors - 1) in scrub_find_csum()
2676 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) in scrub_extent()
2677 blocksize = map->stripe_len; in scrub_extent()
2679 blocksize = sctx->fs_info->sectorsize; in scrub_extent()
2680 spin_lock(&sctx->stat_lock); in scrub_extent()
2681 sctx->stat.data_extents_scrubbed++; in scrub_extent()
2682 sctx->stat.data_bytes_scrubbed += len; in scrub_extent()
2683 spin_unlock(&sctx->stat_lock); in scrub_extent()
2685 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) in scrub_extent()
2686 blocksize = map->stripe_len; in scrub_extent()
2688 blocksize = sctx->fs_info->nodesize; in scrub_extent()
2689 spin_lock(&sctx->stat_lock); in scrub_extent()
2690 sctx->stat.tree_extents_scrubbed++; in scrub_extent()
2691 sctx->stat.tree_bytes_scrubbed += len; in scrub_extent()
2692 spin_unlock(&sctx->stat_lock); in scrub_extent()
2694 blocksize = sctx->fs_info->sectorsize; in scrub_extent()
2699 * For dev-replace case, we can have @dev being a missing device. in scrub_extent()
2705 * So here we change the read source to a good mirror. in scrub_extent()
2707 if (sctx->is_dev_replace && !dev->bdev) in scrub_extent()
2708 scrub_find_good_copy(sctx->fs_info, logical, len, &src_physical, in scrub_extent()
2718 ++sctx->stat.no_csum; in scrub_extent()
2725 len -= l; in scrub_extent()
2738 struct scrub_ctx *sctx = sparity->sctx; in scrub_sectors_for_parity()
2740 const u32 sectorsize = sctx->fs_info->sectorsize; in scrub_sectors_for_parity()
2747 spin_lock(&sctx->stat_lock); in scrub_sectors_for_parity()
2748 sctx->stat.malloc_errors++; in scrub_sectors_for_parity()
2749 spin_unlock(&sctx->stat_lock); in scrub_sectors_for_parity()
2750 return -ENOMEM; in scrub_sectors_for_parity()
2753 sblock->sparity = sparity; in scrub_sectors_for_parity()
2761 spin_lock(&sctx->stat_lock); in scrub_sectors_for_parity()
2762 sctx->stat.malloc_errors++; in scrub_sectors_for_parity()
2763 spin_unlock(&sctx->stat_lock); in scrub_sectors_for_parity()
2765 return -ENOMEM; in scrub_sectors_for_parity()
2767 sblock->sectors[index] = sector; in scrub_sectors_for_parity()
2770 list_add_tail(&sector->list, &sparity->sectors_list); in scrub_sectors_for_parity()
2771 sector->flags = flags; in scrub_sectors_for_parity()
2772 sector->generation = gen; in scrub_sectors_for_parity()
2774 sector->have_csum = 1; in scrub_sectors_for_parity()
2775 memcpy(sector->csum, csum, sctx->fs_info->csum_size); in scrub_sectors_for_parity()
2777 sector->have_csum = 0; in scrub_sectors_for_parity()
2781 len -= sectorsize; in scrub_sectors_for_parity()
2786 WARN_ON(sblock->sector_count == 0); in scrub_sectors_for_parity()
2787 for (index = 0; index < sblock->sector_count; index++) { in scrub_sectors_for_parity()
2788 struct scrub_sector *sector = sblock->sectors[index]; in scrub_sectors_for_parity()
2808 struct scrub_ctx *sctx = sparity->sctx; in scrub_extent_for_parity()
2813 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { in scrub_extent_for_parity()
2819 blocksize = sparity->stripe_len; in scrub_extent_for_parity()
2821 blocksize = sparity->stripe_len; in scrub_extent_for_parity()
2823 blocksize = sctx->fs_info->sectorsize; in scrub_extent_for_parity()
2843 len -= l; in scrub_extent_for_parity()
2853 * the most left data stripe's logical offset.
2855 * return 0 if it is a data stripe, 1 means parity stripe.
2869 last_offset = (physical - map->stripes[num].physical) * data_stripes; in get_raid56_logic_offset()
2875 *offset = last_offset + i * map->stripe_len; in get_raid56_logic_offset()
2877 stripe_nr = div64_u64(*offset, map->stripe_len); in get_raid56_logic_offset()
2880 /* Work out the disk rotation on this stripe-set */ in get_raid56_logic_offset()
2881 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot); in get_raid56_logic_offset()
2882 /* calculate which stripe this data locates */ in get_raid56_logic_offset()
2884 stripe_index = rot % map->num_stripes; in get_raid56_logic_offset()
2890 *offset = last_offset + j * map->stripe_len; in get_raid56_logic_offset()
2896 struct scrub_ctx *sctx = sparity->sctx; in scrub_free_parity()
2900 nbits = bitmap_weight(&sparity->ebitmap, sparity->nsectors); in scrub_free_parity()
2902 spin_lock(&sctx->stat_lock); in scrub_free_parity()
2903 sctx->stat.read_errors += nbits; in scrub_free_parity()
2904 sctx->stat.uncorrectable_errors += nbits; in scrub_free_parity()
2905 spin_unlock(&sctx->stat_lock); in scrub_free_parity()
2908 list_for_each_entry_safe(curr, next, &sparity->sectors_list, list) { in scrub_free_parity()
2909 list_del_init(&curr->list); in scrub_free_parity()
2920 struct scrub_ctx *sctx = sparity->sctx; in scrub_parity_bio_endio_worker()
2922 btrfs_bio_counter_dec(sctx->fs_info); in scrub_parity_bio_endio_worker()
2929 struct scrub_parity *sparity = bio->bi_private; in scrub_parity_bio_endio()
2930 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; in scrub_parity_bio_endio()
2932 if (bio->bi_status) in scrub_parity_bio_endio()
2933 bitmap_or(&sparity->ebitmap, &sparity->ebitmap, in scrub_parity_bio_endio()
2934 &sparity->dbitmap, sparity->nsectors); in scrub_parity_bio_endio()
2938 INIT_WORK(&sparity->work, scrub_parity_bio_endio_worker); in scrub_parity_bio_endio()
2939 queue_work(fs_info->scrub_parity_workers, &sparity->work); in scrub_parity_bio_endio()
2944 struct scrub_ctx *sctx = sparity->sctx; in scrub_parity_check_and_repair()
2945 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_parity_check_and_repair()
2952 if (!bitmap_andnot(&sparity->dbitmap, &sparity->dbitmap, in scrub_parity_check_and_repair()
2953 &sparity->ebitmap, sparity->nsectors)) in scrub_parity_check_and_repair()
2956 length = sparity->logic_end - sparity->logic_start; in scrub_parity_check_and_repair()
2959 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start, in scrub_parity_check_and_repair()
2961 if (ret || !bioc || !bioc->raid_map) in scrub_parity_check_and_repair()
2965 bio->bi_iter.bi_sector = sparity->logic_start >> 9; in scrub_parity_check_and_repair()
2966 bio->bi_private = sparity; in scrub_parity_check_and_repair()
2967 bio->bi_end_io = scrub_parity_bio_endio; in scrub_parity_check_and_repair()
2970 sparity->scrub_dev, in scrub_parity_check_and_repair()
2971 &sparity->dbitmap, in scrub_parity_check_and_repair()
2972 sparity->nsectors); in scrub_parity_check_and_repair()
2985 bitmap_or(&sparity->ebitmap, &sparity->ebitmap, &sparity->dbitmap, in scrub_parity_check_and_repair()
2986 sparity->nsectors); in scrub_parity_check_and_repair()
2987 spin_lock(&sctx->stat_lock); in scrub_parity_check_and_repair()
2988 sctx->stat.malloc_errors++; in scrub_parity_check_and_repair()
2989 spin_unlock(&sctx->stat_lock); in scrub_parity_check_and_repair()
2996 refcount_inc(&sparity->refs); in scrub_parity_get()
3001 if (!refcount_dec_and_test(&sparity->refs)) in scrub_parity_put()
3015 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info; in compare_extent_item_range()
3019 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in compare_extent_item_range()
3023 len = fs_info->nodesize; in compare_extent_item_range()
3028 return -1; in compare_extent_item_range()
3044 * return the extent item. This is for data extent crossing stripe boundary.
3054 struct btrfs_fs_info *fs_info = extent_root->fs_info; in find_first_extent_item()
3059 if (path->nodes[0]) in find_first_extent_item()
3067 key.offset = (u64)-1; in find_first_extent_item()
3087 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in find_first_extent_item()
3100 path->slots[0]++; in find_first_extent_item()
3101 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { in find_first_extent_item()
3120 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in get_extent_info()
3125 *size_ret = path->nodes[0]->fs_info->nodesize; in get_extent_info()
3128 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); in get_extent_info()
3129 *flags_ret = btrfs_extent_flags(path->nodes[0], ei); in get_extent_info()
3130 *generation_ret = btrfs_extent_generation(path->nodes[0], ei); in get_extent_info()
3149 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_raid56_data_stripe_for_parity()
3155 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK); in scrub_raid56_data_stripe_for_parity()
3158 ASSERT(!path->nodes[0]); in scrub_raid56_data_stripe_for_parity()
3160 while (cur_logical < logical + map->stripe_len) { in scrub_raid56_data_stripe_for_parity()
3172 logical + map->stripe_len - cur_logical); in scrub_raid56_data_stripe_for_parity()
3173 /* No more extent item in this data stripe */ in scrub_raid56_data_stripe_for_parity()
3186 logical, map->stripe_len)) { in scrub_raid56_data_stripe_for_parity()
3190 spin_lock(&sctx->stat_lock); in scrub_raid56_data_stripe_for_parity()
3191 sctx->stat.uncorrectable_errors++; in scrub_raid56_data_stripe_for_parity()
3192 spin_unlock(&sctx->stat_lock); in scrub_raid56_data_stripe_for_parity()
3200 /* Truncate the range inside this data stripe */ in scrub_raid56_data_stripe_for_parity()
3202 logical + map->stripe_len) - cur_logical; in scrub_raid56_data_stripe_for_parity()
3212 ret = -EIO; in scrub_raid56_data_stripe_for_parity()
3219 extent_physical = bioc->stripes[0].physical; in scrub_raid56_data_stripe_for_parity()
3220 extent_mirror_num = bioc->mirror_num; in scrub_raid56_data_stripe_for_parity()
3221 extent_dev = bioc->stripes[0].dev; in scrub_raid56_data_stripe_for_parity()
3225 extent_start + extent_size - 1, in scrub_raid56_data_stripe_for_parity()
3226 &sctx->csum_list, 1, false); in scrub_raid56_data_stripe_for_parity()
3258 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_raid56_parity()
3267 spin_lock(&sctx->stat_lock); in scrub_raid56_parity()
3268 sctx->stat.malloc_errors++; in scrub_raid56_parity()
3269 spin_unlock(&sctx->stat_lock); in scrub_raid56_parity()
3270 return -ENOMEM; in scrub_raid56_parity()
3272 path->search_commit_root = 1; in scrub_raid56_parity()
3273 path->skip_locking = 1; in scrub_raid56_parity()
3275 ASSERT(map->stripe_len <= U32_MAX); in scrub_raid56_parity()
3276 nsectors = map->stripe_len >> fs_info->sectorsize_bits; in scrub_raid56_parity()
3280 spin_lock(&sctx->stat_lock); in scrub_raid56_parity()
3281 sctx->stat.malloc_errors++; in scrub_raid56_parity()
3282 spin_unlock(&sctx->stat_lock); in scrub_raid56_parity()
3284 return -ENOMEM; in scrub_raid56_parity()
3287 ASSERT(map->stripe_len <= U32_MAX); in scrub_raid56_parity()
3288 sparity->stripe_len = map->stripe_len; in scrub_raid56_parity()
3289 sparity->nsectors = nsectors; in scrub_raid56_parity()
3290 sparity->sctx = sctx; in scrub_raid56_parity()
3291 sparity->scrub_dev = sdev; in scrub_raid56_parity()
3292 sparity->logic_start = logic_start; in scrub_raid56_parity()
3293 sparity->logic_end = logic_end; in scrub_raid56_parity()
3294 refcount_set(&sparity->refs, 1); in scrub_raid56_parity()
3295 INIT_LIST_HEAD(&sparity->sectors_list); in scrub_raid56_parity()
3299 cur_logical += map->stripe_len) { in scrub_raid56_parity()
3308 mutex_lock(&sctx->wr_lock); in scrub_raid56_parity()
3310 mutex_unlock(&sctx->wr_lock); in scrub_raid56_parity()
3318 if (!btrfs_is_zoned(sctx->fs_info)) in sync_replace_for_zoned()
3321 sctx->flush_all_writes = true; in sync_replace_for_zoned()
3323 mutex_lock(&sctx->wr_lock); in sync_replace_for_zoned()
3325 mutex_unlock(&sctx->wr_lock); in sync_replace_for_zoned()
3327 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); in sync_replace_for_zoned()
3333 struct btrfs_fs_info *fs_info = sctx->fs_info; in sync_write_pointer_for_zoned()
3339 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); in sync_write_pointer_for_zoned()
3341 mutex_lock(&sctx->wr_lock); in sync_write_pointer_for_zoned()
3342 if (sctx->write_pointer < physical_end) { in sync_write_pointer_for_zoned()
3343 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical, in sync_write_pointer_for_zoned()
3345 sctx->write_pointer); in sync_write_pointer_for_zoned()
3350 mutex_unlock(&sctx->wr_lock); in sync_write_pointer_for_zoned()
3351 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical); in sync_write_pointer_for_zoned()
3357 * Scrub one range which can only has simple mirror based profile.
3373 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_simple_mirror()
3382 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); in scrub_simple_mirror()
3395 if (atomic_read(&fs_info->scrub_cancel_req) || in scrub_simple_mirror()
3396 atomic_read(&sctx->cancel_req)) { in scrub_simple_mirror()
3397 ret = -ECANCELED; in scrub_simple_mirror()
3401 if (atomic_read(&fs_info->scrub_pause_req)) { in scrub_simple_mirror()
3403 sctx->flush_all_writes = true; in scrub_simple_mirror()
3405 mutex_lock(&sctx->wr_lock); in scrub_simple_mirror()
3407 mutex_unlock(&sctx->wr_lock); in scrub_simple_mirror()
3408 wait_event(sctx->list_wait, in scrub_simple_mirror()
3409 atomic_read(&sctx->bios_in_flight) == 0); in scrub_simple_mirror()
3410 sctx->flush_all_writes = false; in scrub_simple_mirror()
3414 spin_lock(&bg->lock); in scrub_simple_mirror()
3415 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) { in scrub_simple_mirror()
3416 spin_unlock(&bg->lock); in scrub_simple_mirror()
3420 spin_unlock(&bg->lock); in scrub_simple_mirror()
3423 logical_end - cur_logical); in scrub_simple_mirror()
3426 sctx->stat.last_physical = physical + logical_length; in scrub_simple_mirror()
3439 * - Extent size limit in scrub_simple_mirror()
3440 * - Scrub range limit in scrub_simple_mirror()
3443 * - Max scrub size limit in scrub_simple_mirror()
3446 logical_end), cur_logical + max_length) - in scrub_simple_mirror()
3451 cur_logical + scrub_len - 1, in scrub_simple_mirror()
3452 &sctx->csum_list, 1, false); in scrub_simple_mirror()
3462 spin_lock(&sctx->stat_lock); in scrub_simple_mirror()
3463 sctx->stat.uncorrectable_errors++; in scrub_simple_mirror()
3464 spin_unlock(&sctx->stat_lock); in scrub_simple_mirror()
3469 cur_logical - logical_start + physical, in scrub_simple_mirror()
3475 if (sctx->is_dev_replace) in scrub_simple_mirror()
3488 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | in simple_stripe_full_stripe_len()
3491 return map->num_stripes / map->sub_stripes * map->stripe_len; in simple_stripe_full_stripe_len()
3499 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | in simple_stripe_get_logical()
3501 ASSERT(stripe_index < map->num_stripes); in simple_stripe_get_logical()
3504 * (stripe_index / sub_stripes) gives how many data stripes we need to in simple_stripe_get_logical()
3507 return (stripe_index / map->sub_stripes) * map->stripe_len + bg->start; in simple_stripe_get_logical()
3510 /* Get the mirror number for the stripe */
3513 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | in simple_stripe_mirror_num()
3515 ASSERT(stripe_index < map->num_stripes); in simple_stripe_mirror_num()
3518 return stripe_index % map->sub_stripes + 1; in simple_stripe_mirror_num()
3531 const u64 orig_physical = map->stripes[stripe_index].physical; in scrub_simple_stripe()
3537 while (cur_logical < bg->start + bg->length) { in scrub_simple_stripe()
3544 cur_logical, map->stripe_len, device, in scrub_simple_stripe()
3551 cur_physical += map->stripe_len; in scrub_simple_stripe()
3563 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe()
3567 struct map_lookup *map = em->map_lookup; in scrub_stripe()
3568 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK; in scrub_stripe()
3569 const u64 chunk_logical = bg->start; in scrub_stripe()
3571 u64 physical = map->stripes[stripe_index].physical; in scrub_stripe()
3586 return -ENOMEM; in scrub_stripe()
3593 path->search_commit_root = 1; in scrub_stripe()
3594 path->skip_locking = 1; in scrub_stripe()
3595 path->reada = READA_FORWARD; in scrub_stripe()
3597 wait_event(sctx->list_wait, in scrub_stripe()
3598 atomic_read(&sctx->bios_in_flight) == 0); in scrub_stripe()
3601 root = btrfs_extent_root(fs_info, bg->start); in scrub_stripe()
3602 csum_root = btrfs_csum_root(fs_info, bg->start); in scrub_stripe()
3605 * collect all data csums for the stripe to avoid seeking during in scrub_stripe()
3610 if (sctx->is_dev_replace && in scrub_stripe()
3611 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) { in scrub_stripe()
3612 mutex_lock(&sctx->wr_lock); in scrub_stripe()
3613 sctx->write_pointer = physical; in scrub_stripe()
3614 mutex_unlock(&sctx->wr_lock); in scrub_stripe()
3615 sctx->flush_all_writes = true; in scrub_stripe()
3636 bg->start, bg->length, scrub_dev, in scrub_stripe()
3637 map->stripes[stripe_index].physical, in scrub_stripe()
3645 offset = map->stripe_len * (stripe_index / map->sub_stripes); in scrub_stripe()
3650 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK); in scrub_stripe()
3660 increment = map->stripe_len * nr_data_stripes(map); in scrub_stripe()
3683 * Now we're at a data stripe, scrub each extents in the range. in scrub_stripe()
3685 * At this stage, if we ignore the repair part, inside each data in scrub_stripe()
3691 logical, map->stripe_len, in scrub_stripe()
3697 physical += map->stripe_len; in scrub_stripe()
3698 spin_lock(&sctx->stat_lock); in scrub_stripe()
3700 sctx->stat.last_physical = in scrub_stripe()
3701 map->stripes[stripe_index].physical + dev_stripe_len; in scrub_stripe()
3703 sctx->stat.last_physical = physical; in scrub_stripe()
3704 spin_unlock(&sctx->stat_lock); in scrub_stripe()
3711 mutex_lock(&sctx->wr_lock); in scrub_stripe()
3713 mutex_unlock(&sctx->wr_lock); in scrub_stripe()
3718 if (sctx->is_dev_replace && ret >= 0) { in scrub_stripe()
3723 map->stripes[stripe_index].physical, in scrub_stripe()
3738 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_chunk()
3739 struct extent_map_tree *map_tree = &fs_info->mapping_tree; in scrub_chunk()
3745 read_lock(&map_tree->lock); in scrub_chunk()
3746 em = lookup_extent_mapping(map_tree, bg->start, bg->length); in scrub_chunk()
3747 read_unlock(&map_tree->lock); in scrub_chunk()
3754 spin_lock(&bg->lock); in scrub_chunk()
3755 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) in scrub_chunk()
3756 ret = -EINVAL; in scrub_chunk()
3757 spin_unlock(&bg->lock); in scrub_chunk()
3761 if (em->start != bg->start) in scrub_chunk()
3763 if (em->len < dev_extent_len) in scrub_chunk()
3766 map = em->map_lookup; in scrub_chunk()
3767 for (i = 0; i < map->num_stripes; ++i) { in scrub_chunk()
3768 if (map->stripes[i].dev->bdev == scrub_dev->bdev && in scrub_chunk()
3769 map->stripes[i].physical == dev_offset) { in scrub_chunk()
3784 struct btrfs_fs_info *fs_info = cache->fs_info; in finish_extent_writes_for_zoned()
3792 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length); in finish_extent_writes_for_zoned()
3806 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_enumerate_chunks()
3807 struct btrfs_root *root = fs_info->dev_root; in scrub_enumerate_chunks()
3816 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; in scrub_enumerate_chunks()
3820 return -ENOMEM; in scrub_enumerate_chunks()
3822 path->reada = READA_FORWARD; in scrub_enumerate_chunks()
3823 path->search_commit_root = 1; in scrub_enumerate_chunks()
3824 path->skip_locking = 1; in scrub_enumerate_chunks()
3826 key.objectid = scrub_dev->devid; in scrub_enumerate_chunks()
3837 if (path->slots[0] >= in scrub_enumerate_chunks()
3838 btrfs_header_nritems(path->nodes[0])) { in scrub_enumerate_chunks()
3851 l = path->nodes[0]; in scrub_enumerate_chunks()
3852 slot = path->slots[0]; in scrub_enumerate_chunks()
3856 if (found_key.objectid != scrub_dev->devid) in scrub_enumerate_chunks()
3887 ASSERT(cache->start <= chunk_offset); in scrub_enumerate_chunks()
3907 if (cache->start < chunk_offset) { in scrub_enumerate_chunks()
3912 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) { in scrub_enumerate_chunks()
3913 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) { in scrub_enumerate_chunks()
3927 spin_lock(&cache->lock); in scrub_enumerate_chunks()
3928 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) { in scrub_enumerate_chunks()
3929 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
3934 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
3940 * -> btrfs_wait_for_commit() in scrub_enumerate_chunks()
3941 * -> btrfs_commit_transaction() in scrub_enumerate_chunks()
3942 * -> btrfs_scrub_pause() in scrub_enumerate_chunks()
3950 * -EFBIG from btrfs_finish_chunk_alloc() like: in scrub_enumerate_chunks()
3966 * - Write duplication in scrub_enumerate_chunks()
3967 * Contains latest data in scrub_enumerate_chunks()
3968 * - Scrub copy in scrub_enumerate_chunks()
3969 * Contains data from commit tree in scrub_enumerate_chunks()
3972 * be overwritten by scrub copy, causing data corruption. in scrub_enumerate_chunks()
3973 * So for dev-replace, it's not allowed to continue if a block in scrub_enumerate_chunks()
3976 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace); in scrub_enumerate_chunks()
3977 if (!ret && sctx->is_dev_replace) { in scrub_enumerate_chunks()
3989 } else if (ret == -ENOSPC && !sctx->is_dev_replace) { in scrub_enumerate_chunks()
3991 * btrfs_inc_block_group_ro return -ENOSPC when it in scrub_enumerate_chunks()
3998 } else if (ret == -ETXTBSY) { in scrub_enumerate_chunks()
4001 cache->start); in scrub_enumerate_chunks()
4016 * finish before dev-replace. in scrub_enumerate_chunks()
4019 if (sctx->is_dev_replace) { in scrub_enumerate_chunks()
4021 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, in scrub_enumerate_chunks()
4022 cache->length); in scrub_enumerate_chunks()
4026 down_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
4027 dev_replace->cursor_right = found_key.offset + dev_extent_len; in scrub_enumerate_chunks()
4028 dev_replace->cursor_left = found_key.offset; in scrub_enumerate_chunks()
4029 dev_replace->item_needs_writeback = 1; in scrub_enumerate_chunks()
4030 up_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
4045 sctx->flush_all_writes = true; in scrub_enumerate_chunks()
4047 mutex_lock(&sctx->wr_lock); in scrub_enumerate_chunks()
4049 mutex_unlock(&sctx->wr_lock); in scrub_enumerate_chunks()
4051 wait_event(sctx->list_wait, in scrub_enumerate_chunks()
4052 atomic_read(&sctx->bios_in_flight) == 0); in scrub_enumerate_chunks()
4061 wait_event(sctx->list_wait, in scrub_enumerate_chunks()
4062 atomic_read(&sctx->workers_pending) == 0); in scrub_enumerate_chunks()
4063 sctx->flush_all_writes = false; in scrub_enumerate_chunks()
4067 if (sctx->is_dev_replace && in scrub_enumerate_chunks()
4068 !btrfs_finish_block_group_to_copy(dev_replace->srcdev, in scrub_enumerate_chunks()
4072 down_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
4073 dev_replace->cursor_left = dev_replace->cursor_right; in scrub_enumerate_chunks()
4074 dev_replace->item_needs_writeback = 1; in scrub_enumerate_chunks()
4075 up_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
4087 spin_lock(&cache->lock); in scrub_enumerate_chunks()
4088 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) && in scrub_enumerate_chunks()
4089 !cache->ro && cache->reserved == 0 && cache->used == 0) { in scrub_enumerate_chunks()
4090 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
4092 btrfs_discard_queue_work(&fs_info->discard_ctl, in scrub_enumerate_chunks()
4097 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
4104 if (sctx->is_dev_replace && in scrub_enumerate_chunks()
4105 atomic64_read(&dev_replace->num_write_errors) > 0) { in scrub_enumerate_chunks()
4106 ret = -EIO; in scrub_enumerate_chunks()
4109 if (sctx->stat.malloc_errors > 0) { in scrub_enumerate_chunks()
4110 ret = -ENOMEM; in scrub_enumerate_chunks()
4130 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_supers()
4133 return -EROFS; in scrub_supers()
4136 if (scrub_dev->fs_devices != fs_info->fs_devices) in scrub_supers()
4137 gen = scrub_dev->generation; in scrub_supers()
4139 gen = fs_info->last_trans_committed; in scrub_supers()
4144 scrub_dev->commit_total_bytes) in scrub_supers()
4155 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); in scrub_supers()
4162 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt, in scrub_workers_put()
4163 &fs_info->scrub_lock)) { in scrub_workers_put()
4164 struct workqueue_struct *scrub_workers = fs_info->scrub_workers; in scrub_workers_put()
4166 fs_info->scrub_wr_completion_workers; in scrub_workers_put()
4168 fs_info->scrub_parity_workers; in scrub_workers_put()
4170 fs_info->scrub_workers = NULL; in scrub_workers_put()
4171 fs_info->scrub_wr_completion_workers = NULL; in scrub_workers_put()
4172 fs_info->scrub_parity_workers = NULL; in scrub_workers_put()
4173 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_put()
4185 * get a reference count on fs_info->scrub_workers. start worker if necessary
4194 int max_active = fs_info->thread_pool_size; in scrub_workers_get()
4195 int ret = -ENOMEM; in scrub_workers_get()
4197 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt)) in scrub_workers_get()
4200 scrub_workers = alloc_workqueue("btrfs-scrub", flags, in scrub_workers_get()
4205 scrub_wr_comp = alloc_workqueue("btrfs-scrubwrc", flags, max_active); in scrub_workers_get()
4209 scrub_parity = alloc_workqueue("btrfs-scrubparity", flags, max_active); in scrub_workers_get()
4213 mutex_lock(&fs_info->scrub_lock); in scrub_workers_get()
4214 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { in scrub_workers_get()
4215 ASSERT(fs_info->scrub_workers == NULL && in scrub_workers_get()
4216 fs_info->scrub_wr_completion_workers == NULL && in scrub_workers_get()
4217 fs_info->scrub_parity_workers == NULL); in scrub_workers_get()
4218 fs_info->scrub_workers = scrub_workers; in scrub_workers_get()
4219 fs_info->scrub_wr_completion_workers = scrub_wr_comp; in scrub_workers_get()
4220 fs_info->scrub_parity_workers = scrub_parity; in scrub_workers_get()
4221 refcount_set(&fs_info->scrub_workers_refcnt, 1); in scrub_workers_get()
4222 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_get()
4226 refcount_inc(&fs_info->scrub_workers_refcnt); in scrub_workers_get()
4227 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_get()
4251 return -EAGAIN; in btrfs_scrub_dev()
4254 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN); in btrfs_scrub_dev()
4261 ASSERT(fs_info->nodesize <= in btrfs_scrub_dev()
4262 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits); in btrfs_scrub_dev()
4273 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
4274 dev = btrfs_find_device(fs_info->fs_devices, &args); in btrfs_scrub_dev()
4275 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && in btrfs_scrub_dev()
4277 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
4278 ret = -ENODEV; in btrfs_scrub_dev()
4283 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { in btrfs_scrub_dev()
4284 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
4287 devid, rcu_str_deref(dev->name)); in btrfs_scrub_dev()
4288 ret = -EROFS; in btrfs_scrub_dev()
4292 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_dev()
4293 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || in btrfs_scrub_dev()
4294 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { in btrfs_scrub_dev()
4295 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
4296 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
4297 ret = -EIO; in btrfs_scrub_dev()
4301 down_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
4302 if (dev->scrub_ctx || in btrfs_scrub_dev()
4304 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { in btrfs_scrub_dev()
4305 up_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
4306 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
4307 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
4308 ret = -EINPROGRESS; in btrfs_scrub_dev()
4311 up_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
4313 sctx->readonly = readonly; in btrfs_scrub_dev()
4314 dev->scrub_ctx = sctx; in btrfs_scrub_dev()
4315 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
4322 atomic_inc(&fs_info->scrubs_running); in btrfs_scrub_dev()
4323 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
4332 * before incrementing fs_info->scrubs_running). in btrfs_scrub_dev()
4338 spin_lock(&sctx->stat_lock); in btrfs_scrub_dev()
4339 old_super_errors = sctx->stat.super_errors; in btrfs_scrub_dev()
4340 spin_unlock(&sctx->stat_lock); in btrfs_scrub_dev()
4347 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
4349 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
4351 spin_lock(&sctx->stat_lock); in btrfs_scrub_dev()
4357 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly) in btrfs_scrub_dev()
4359 spin_unlock(&sctx->stat_lock); in btrfs_scrub_dev()
4366 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); in btrfs_scrub_dev()
4367 atomic_dec(&fs_info->scrubs_running); in btrfs_scrub_dev()
4368 wake_up(&fs_info->scrub_pause_wait); in btrfs_scrub_dev()
4370 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); in btrfs_scrub_dev()
4373 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_dev()
4379 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_dev()
4380 dev->scrub_ctx = NULL; in btrfs_scrub_dev()
4381 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
4393 trans = btrfs_start_transaction(fs_info->tree_root, 0); in btrfs_scrub_dev()
4416 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_pause()
4417 atomic_inc(&fs_info->scrub_pause_req); in btrfs_scrub_pause()
4418 while (atomic_read(&fs_info->scrubs_paused) != in btrfs_scrub_pause()
4419 atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_pause()
4420 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_pause()
4421 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_pause()
4422 atomic_read(&fs_info->scrubs_paused) == in btrfs_scrub_pause()
4423 atomic_read(&fs_info->scrubs_running)); in btrfs_scrub_pause()
4424 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_pause()
4426 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_pause()
4431 atomic_dec(&fs_info->scrub_pause_req); in btrfs_scrub_continue()
4432 wake_up(&fs_info->scrub_pause_wait); in btrfs_scrub_continue()
4437 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
4438 if (!atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_cancel()
4439 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
4440 return -ENOTCONN; in btrfs_scrub_cancel()
4443 atomic_inc(&fs_info->scrub_cancel_req); in btrfs_scrub_cancel()
4444 while (atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_cancel()
4445 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
4446 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_cancel()
4447 atomic_read(&fs_info->scrubs_running) == 0); in btrfs_scrub_cancel()
4448 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
4450 atomic_dec(&fs_info->scrub_cancel_req); in btrfs_scrub_cancel()
4451 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
4458 struct btrfs_fs_info *fs_info = dev->fs_info; in btrfs_scrub_cancel_dev()
4461 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
4462 sctx = dev->scrub_ctx; in btrfs_scrub_cancel_dev()
4464 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
4465 return -ENOTCONN; in btrfs_scrub_cancel_dev()
4467 atomic_inc(&sctx->cancel_req); in btrfs_scrub_cancel_dev()
4468 while (dev->scrub_ctx) { in btrfs_scrub_cancel_dev()
4469 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
4470 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_cancel_dev()
4471 dev->scrub_ctx == NULL); in btrfs_scrub_cancel_dev()
4472 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
4474 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
4486 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_progress()
4487 dev = btrfs_find_device(fs_info->fs_devices, &args); in btrfs_scrub_progress()
4489 sctx = dev->scrub_ctx; in btrfs_scrub_progress()
4491 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_progress()
4492 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_progress()
4494 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; in btrfs_scrub_progress()
4511 !bioc->stripes[0].dev->bdev) { in scrub_find_good_copy()
4516 *extent_physical = bioc->stripes[0].physical; in scrub_find_good_copy()
4517 *extent_mirror_num = bioc->mirror_num; in scrub_find_good_copy()
4518 *extent_dev = bioc->stripes[0].dev; in scrub_find_good_copy()