Lines Matching refs:sctx
82 struct scrub_ctx *sctx; member
113 struct scrub_ctx *sctx; member
130 struct scrub_ctx *sctx; member
255 static struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx, in alloc_scrub_block() argument
267 sblock->sctx = sctx; in alloc_scrub_block()
329 sblock->len += sblock->sctx->fs_info->sectorsize; in alloc_scrub_sector()
405 static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
417 static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
419 static void scrub_wr_submit(struct scrub_ctx *sctx);
422 static void scrub_put_ctx(struct scrub_ctx *sctx);
430 static void scrub_pending_bio_inc(struct scrub_ctx *sctx) in scrub_pending_bio_inc() argument
432 refcount_inc(&sctx->refs); in scrub_pending_bio_inc()
433 atomic_inc(&sctx->bios_in_flight); in scrub_pending_bio_inc()
436 static void scrub_pending_bio_dec(struct scrub_ctx *sctx) in scrub_pending_bio_dec() argument
438 atomic_dec(&sctx->bios_in_flight); in scrub_pending_bio_dec()
439 wake_up(&sctx->list_wait); in scrub_pending_bio_dec()
440 scrub_put_ctx(sctx); in scrub_pending_bio_dec()
692 static void scrub_free_csums(struct scrub_ctx *sctx) in scrub_free_csums() argument
694 while (!list_empty(&sctx->csum_list)) { in scrub_free_csums()
696 sum = list_first_entry(&sctx->csum_list, in scrub_free_csums()
703 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) in scrub_free_ctx() argument
707 if (!sctx) in scrub_free_ctx()
711 if (sctx->curr != -1) { in scrub_free_ctx()
712 struct scrub_bio *sbio = sctx->bios[sctx->curr]; in scrub_free_ctx()
720 struct scrub_bio *sbio = sctx->bios[i]; in scrub_free_ctx()
727 kfree(sctx->wr_curr_bio); in scrub_free_ctx()
728 scrub_free_csums(sctx); in scrub_free_ctx()
729 kfree(sctx); in scrub_free_ctx()
732 static void scrub_put_ctx(struct scrub_ctx *sctx) in scrub_put_ctx() argument
734 if (refcount_dec_and_test(&sctx->refs)) in scrub_put_ctx()
735 scrub_free_ctx(sctx); in scrub_put_ctx()
741 struct scrub_ctx *sctx; in scrub_setup_ctx() local
744 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL); in scrub_setup_ctx()
745 if (!sctx) in scrub_setup_ctx()
747 refcount_set(&sctx->refs, 1); in scrub_setup_ctx()
748 sctx->is_dev_replace = is_dev_replace; in scrub_setup_ctx()
749 sctx->sectors_per_bio = SCRUB_SECTORS_PER_BIO; in scrub_setup_ctx()
750 sctx->curr = -1; in scrub_setup_ctx()
751 sctx->fs_info = fs_info; in scrub_setup_ctx()
752 INIT_LIST_HEAD(&sctx->csum_list); in scrub_setup_ctx()
759 sctx->bios[i] = sbio; in scrub_setup_ctx()
762 sbio->sctx = sctx; in scrub_setup_ctx()
767 sctx->bios[i]->next_free = i + 1; in scrub_setup_ctx()
769 sctx->bios[i]->next_free = -1; in scrub_setup_ctx()
771 sctx->first_free = 0; in scrub_setup_ctx()
772 atomic_set(&sctx->bios_in_flight, 0); in scrub_setup_ctx()
773 atomic_set(&sctx->workers_pending, 0); in scrub_setup_ctx()
774 atomic_set(&sctx->cancel_req, 0); in scrub_setup_ctx()
776 spin_lock_init(&sctx->list_lock); in scrub_setup_ctx()
777 spin_lock_init(&sctx->stat_lock); in scrub_setup_ctx()
778 init_waitqueue_head(&sctx->list_wait); in scrub_setup_ctx()
779 sctx->throttle_deadline = 0; in scrub_setup_ctx()
781 WARN_ON(sctx->wr_curr_bio != NULL); in scrub_setup_ctx()
782 mutex_init(&sctx->wr_lock); in scrub_setup_ctx()
783 sctx->wr_curr_bio = NULL; in scrub_setup_ctx()
786 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; in scrub_setup_ctx()
787 sctx->flush_all_writes = false; in scrub_setup_ctx()
790 return sctx; in scrub_setup_ctx()
793 scrub_free_ctx(sctx); in scrub_setup_ctx()
906 fs_info = sblock->sctx->fs_info; in scrub_print_warning()
989 struct scrub_ctx *sctx = sblock_to_check->sctx; in scrub_handle_errored_block() local
1009 fs_info = sctx->fs_info; in scrub_handle_errored_block()
1017 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1018 ++sctx->stat.super_errors; in scrub_handle_errored_block()
1019 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1030 if (!sctx->is_dev_replace && btrfs_repair_one_zone(fs_info, logical)) in scrub_handle_errored_block()
1053 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1055 sctx->stat.malloc_errors++; in scrub_handle_errored_block()
1056 sctx->stat.read_errors++; in scrub_handle_errored_block()
1057 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
1058 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1101 sblocks_for_recheck[mirror_index] = alloc_scrub_block(sctx, NULL, in scrub_handle_errored_block()
1104 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1105 sctx->stat.malloc_errors++; in scrub_handle_errored_block()
1106 sctx->stat.read_errors++; in scrub_handle_errored_block()
1107 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
1108 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1117 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1118 sctx->stat.read_errors++; in scrub_handle_errored_block()
1119 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
1120 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1140 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1141 sctx->stat.unverified_errors++; in scrub_handle_errored_block()
1143 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1145 if (sctx->is_dev_replace) in scrub_handle_errored_block()
1151 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1152 sctx->stat.read_errors++; in scrub_handle_errored_block()
1153 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1158 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1159 sctx->stat.csum_errors++; in scrub_handle_errored_block()
1160 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1166 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1167 sctx->stat.verify_errors++; in scrub_handle_errored_block()
1168 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1180 if (sctx->readonly) { in scrub_handle_errored_block()
1181 ASSERT(!sctx->is_dev_replace); in scrub_handle_errored_block()
1234 if (sctx->is_dev_replace) { in scrub_handle_errored_block()
1246 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) in scrub_handle_errored_block()
1280 if (!sector_bad->io_error && !sctx->is_dev_replace) in scrub_handle_errored_block()
1308 if (sctx->is_dev_replace) { in scrub_handle_errored_block()
1335 if (success && !sctx->is_dev_replace) { in scrub_handle_errored_block()
1355 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1356 sctx->stat.corrected_errors++; in scrub_handle_errored_block()
1358 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1365 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1366 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
1367 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1447 struct scrub_ctx *sctx = original_sblock->sctx; in scrub_setup_recheck_block() local
1448 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_setup_recheck_block()
1504 sblock->sctx = sctx; in scrub_setup_recheck_block()
1508 spin_lock(&sctx->stat_lock); in scrub_setup_recheck_block()
1509 sctx->stat.malloc_errors++; in scrub_setup_recheck_block()
1510 spin_unlock(&sctx->stat_lock); in scrub_setup_recheck_block()
1520 sctx->fs_info->csum_size); in scrub_setup_recheck_block()
1707 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info; in scrub_repair_sector_from_good_copy()
1744 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; in scrub_write_block_to_dev_replace()
1765 const u32 sectorsize = sblock->sctx->fs_info->sectorsize; in scrub_write_sector_to_dev_replace()
1771 return scrub_add_sector_to_wr_bio(sblock->sctx, sector); in scrub_write_sector_to_dev_replace()
1774 static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical) in fill_writer_pointer_gap() argument
1779 if (!btrfs_is_zoned(sctx->fs_info)) in fill_writer_pointer_gap()
1782 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) in fill_writer_pointer_gap()
1785 if (sctx->write_pointer < physical) { in fill_writer_pointer_gap()
1786 length = physical - sctx->write_pointer; in fill_writer_pointer_gap()
1788 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev, in fill_writer_pointer_gap()
1789 sctx->write_pointer, length); in fill_writer_pointer_gap()
1791 sctx->write_pointer = physical; in fill_writer_pointer_gap()
1801 static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx, in scrub_add_sector_to_wr_bio() argument
1807 const u32 sectorsize = sctx->fs_info->sectorsize; in scrub_add_sector_to_wr_bio()
1809 mutex_lock(&sctx->wr_lock); in scrub_add_sector_to_wr_bio()
1811 if (!sctx->wr_curr_bio) { in scrub_add_sector_to_wr_bio()
1812 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio), in scrub_add_sector_to_wr_bio()
1814 if (!sctx->wr_curr_bio) { in scrub_add_sector_to_wr_bio()
1815 mutex_unlock(&sctx->wr_lock); in scrub_add_sector_to_wr_bio()
1818 sctx->wr_curr_bio->sctx = sctx; in scrub_add_sector_to_wr_bio()
1819 sctx->wr_curr_bio->sector_count = 0; in scrub_add_sector_to_wr_bio()
1821 sbio = sctx->wr_curr_bio; in scrub_add_sector_to_wr_bio()
1823 ret = fill_writer_pointer_gap(sctx, sector->offset + in scrub_add_sector_to_wr_bio()
1826 mutex_unlock(&sctx->wr_lock); in scrub_add_sector_to_wr_bio()
1832 sbio->dev = sctx->wr_tgtdev; in scrub_add_sector_to_wr_bio()
1834 sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio, in scrub_add_sector_to_wr_bio()
1845 scrub_wr_submit(sctx); in scrub_add_sector_to_wr_bio()
1854 mutex_unlock(&sctx->wr_lock); in scrub_add_sector_to_wr_bio()
1857 scrub_wr_submit(sctx); in scrub_add_sector_to_wr_bio()
1871 if (sbio->sector_count == sctx->sectors_per_bio) in scrub_add_sector_to_wr_bio()
1872 scrub_wr_submit(sctx); in scrub_add_sector_to_wr_bio()
1873 mutex_unlock(&sctx->wr_lock); in scrub_add_sector_to_wr_bio()
1878 static void scrub_wr_submit(struct scrub_ctx *sctx) in scrub_wr_submit() argument
1882 if (!sctx->wr_curr_bio) in scrub_wr_submit()
1885 sbio = sctx->wr_curr_bio; in scrub_wr_submit()
1886 sctx->wr_curr_bio = NULL; in scrub_wr_submit()
1887 scrub_pending_bio_inc(sctx); in scrub_wr_submit()
1895 if (btrfs_is_zoned(sctx->fs_info)) in scrub_wr_submit()
1896 sctx->write_pointer = sbio->physical + sbio->sector_count * in scrub_wr_submit()
1897 sctx->fs_info->sectorsize; in scrub_wr_submit()
1915 struct scrub_ctx *sctx = sbio->sctx; in scrub_wr_bio_end_io_worker() local
1921 &sbio->sctx->fs_info->dev_replace; in scrub_wr_bio_end_io_worker()
1942 scrub_pending_bio_dec(sctx); in scrub_wr_bio_end_io_worker()
1981 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_data() local
1982 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_checksum_data()
2007 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_tree_block() local
2009 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_checksum_tree_block()
2018 const u32 sectorsize = sctx->fs_info->sectorsize; in scrub_checksum_tree_block()
2032 memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size); in scrub_checksum_tree_block()
2065 if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size)) in scrub_checksum_tree_block()
2074 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_super() local
2075 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_checksum_super()
2102 if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size)) in scrub_checksum_super()
2143 static void scrub_throttle(struct scrub_ctx *sctx) in scrub_throttle() argument
2153 sbio = sctx->bios[sctx->curr]; in scrub_throttle()
2168 if (sctx->throttle_deadline == 0) { in scrub_throttle()
2169 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div); in scrub_throttle()
2170 sctx->throttle_sent = 0; in scrub_throttle()
2174 if (ktime_before(now, sctx->throttle_deadline)) { in scrub_throttle()
2176 sctx->throttle_sent += sbio->bio->bi_iter.bi_size; in scrub_throttle()
2177 if (sctx->throttle_sent <= div_u64(bwlimit, div)) in scrub_throttle()
2181 delta = ktime_ms_delta(sctx->throttle_deadline, now); in scrub_throttle()
2195 sctx->throttle_deadline = 0; in scrub_throttle()
2198 static void scrub_submit(struct scrub_ctx *sctx) in scrub_submit() argument
2202 if (sctx->curr == -1) in scrub_submit()
2205 scrub_throttle(sctx); in scrub_submit()
2207 sbio = sctx->bios[sctx->curr]; in scrub_submit()
2208 sctx->curr = -1; in scrub_submit()
2209 scrub_pending_bio_inc(sctx); in scrub_submit()
2214 static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx, in scrub_add_sector_to_rd_bio() argument
2219 const u32 sectorsize = sctx->fs_info->sectorsize; in scrub_add_sector_to_rd_bio()
2226 while (sctx->curr == -1) { in scrub_add_sector_to_rd_bio()
2227 spin_lock(&sctx->list_lock); in scrub_add_sector_to_rd_bio()
2228 sctx->curr = sctx->first_free; in scrub_add_sector_to_rd_bio()
2229 if (sctx->curr != -1) { in scrub_add_sector_to_rd_bio()
2230 sctx->first_free = sctx->bios[sctx->curr]->next_free; in scrub_add_sector_to_rd_bio()
2231 sctx->bios[sctx->curr]->next_free = -1; in scrub_add_sector_to_rd_bio()
2232 sctx->bios[sctx->curr]->sector_count = 0; in scrub_add_sector_to_rd_bio()
2233 spin_unlock(&sctx->list_lock); in scrub_add_sector_to_rd_bio()
2235 spin_unlock(&sctx->list_lock); in scrub_add_sector_to_rd_bio()
2236 wait_event(sctx->list_wait, sctx->first_free != -1); in scrub_add_sector_to_rd_bio()
2239 sbio = sctx->bios[sctx->curr]; in scrub_add_sector_to_rd_bio()
2245 sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio, in scrub_add_sector_to_rd_bio()
2257 scrub_submit(sctx); in scrub_add_sector_to_rd_bio()
2269 scrub_submit(sctx); in scrub_add_sector_to_rd_bio()
2276 if (sbio->sector_count == sctx->sectors_per_bio) in scrub_add_sector_to_rd_bio()
2277 scrub_submit(sctx); in scrub_add_sector_to_rd_bio()
2285 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; in scrub_missing_raid56_end_io()
2299 struct scrub_ctx *sctx = sblock->sctx; in scrub_missing_raid56_worker() local
2300 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_missing_raid56_worker()
2311 spin_lock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2312 sctx->stat.read_errors++; in scrub_missing_raid56_worker()
2313 spin_unlock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2318 spin_lock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2319 sctx->stat.uncorrectable_errors++; in scrub_missing_raid56_worker()
2320 spin_unlock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2328 if (sctx->is_dev_replace && sctx->flush_all_writes) { in scrub_missing_raid56_worker()
2329 mutex_lock(&sctx->wr_lock); in scrub_missing_raid56_worker()
2330 scrub_wr_submit(sctx); in scrub_missing_raid56_worker()
2331 mutex_unlock(&sctx->wr_lock); in scrub_missing_raid56_worker()
2335 scrub_pending_bio_dec(sctx); in scrub_missing_raid56_worker()
2340 struct scrub_ctx *sctx = sblock->sctx; in scrub_missing_raid56_pages() local
2341 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_missing_raid56_pages()
2356 if (WARN_ON(!sctx->is_dev_replace || in scrub_missing_raid56_pages()
2386 scrub_pending_bio_inc(sctx); in scrub_missing_raid56_pages()
2396 spin_lock(&sctx->stat_lock); in scrub_missing_raid56_pages()
2397 sctx->stat.malloc_errors++; in scrub_missing_raid56_pages()
2398 spin_unlock(&sctx->stat_lock); in scrub_missing_raid56_pages()
2401 static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len, in scrub_sectors() argument
2407 const u32 sectorsize = sctx->fs_info->sectorsize; in scrub_sectors()
2410 sblock = alloc_scrub_block(sctx, dev, logical, physical, in scrub_sectors()
2413 spin_lock(&sctx->stat_lock); in scrub_sectors()
2414 sctx->stat.malloc_errors++; in scrub_sectors()
2415 spin_unlock(&sctx->stat_lock); in scrub_sectors()
2430 spin_lock(&sctx->stat_lock); in scrub_sectors()
2431 sctx->stat.malloc_errors++; in scrub_sectors()
2432 spin_unlock(&sctx->stat_lock); in scrub_sectors()
2440 memcpy(sector->csum, csum, sctx->fs_info->csum_size); in scrub_sectors()
2462 ret = scrub_add_sector_to_rd_bio(sctx, sector); in scrub_sectors()
2470 scrub_submit(sctx); in scrub_sectors()
2492 struct scrub_ctx *sctx = sbio->sctx; in scrub_bio_end_io_worker() local
2517 spin_lock(&sctx->list_lock); in scrub_bio_end_io_worker()
2518 sbio->next_free = sctx->first_free; in scrub_bio_end_io_worker()
2519 sctx->first_free = sbio->index; in scrub_bio_end_io_worker()
2520 spin_unlock(&sctx->list_lock); in scrub_bio_end_io_worker()
2522 if (sctx->is_dev_replace && sctx->flush_all_writes) { in scrub_bio_end_io_worker()
2523 mutex_lock(&sctx->wr_lock); in scrub_bio_end_io_worker()
2524 scrub_wr_submit(sctx); in scrub_bio_end_io_worker()
2525 mutex_unlock(&sctx->wr_lock); in scrub_bio_end_io_worker()
2528 scrub_pending_bio_dec(sctx); in scrub_bio_end_io_worker()
2537 u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits; in __scrub_mark_bitmap()
2584 if (!corrupted && sblock->sctx->is_dev_replace) in scrub_block_complete()
2592 sblock->sctx->fs_info->sectorsize; in scrub_block_complete()
2600 static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum) in drop_csum_range() argument
2602 sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits; in drop_csum_range()
2618 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum) in scrub_find_csum() argument
2622 while (!list_empty(&sctx->csum_list)) { in scrub_find_csum()
2627 sum = list_first_entry(&sctx->csum_list, in scrub_find_csum()
2640 drop_csum_range(sctx, sum); in scrub_find_csum()
2646 index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits; in scrub_find_csum()
2647 num_sectors = sum->len >> sctx->fs_info->sectorsize_bits; in scrub_find_csum()
2649 memcpy(csum, sum->sums + index * sctx->fs_info->csum_size, in scrub_find_csum()
2650 sctx->fs_info->csum_size); in scrub_find_csum()
2654 drop_csum_range(sctx, sum); in scrub_find_csum()
2663 static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map, in scrub_extent() argument
2679 blocksize = sctx->fs_info->sectorsize; in scrub_extent()
2680 spin_lock(&sctx->stat_lock); in scrub_extent()
2681 sctx->stat.data_extents_scrubbed++; in scrub_extent()
2682 sctx->stat.data_bytes_scrubbed += len; in scrub_extent()
2683 spin_unlock(&sctx->stat_lock); in scrub_extent()
2688 blocksize = sctx->fs_info->nodesize; in scrub_extent()
2689 spin_lock(&sctx->stat_lock); in scrub_extent()
2690 sctx->stat.tree_extents_scrubbed++; in scrub_extent()
2691 sctx->stat.tree_bytes_scrubbed += len; in scrub_extent()
2692 spin_unlock(&sctx->stat_lock); in scrub_extent()
2694 blocksize = sctx->fs_info->sectorsize; in scrub_extent()
2707 if (sctx->is_dev_replace && !dev->bdev) in scrub_extent()
2708 scrub_find_good_copy(sctx->fs_info, logical, len, &src_physical, in scrub_extent()
2716 have_csum = scrub_find_csum(sctx, logical, csum); in scrub_extent()
2718 ++sctx->stat.no_csum; in scrub_extent()
2720 ret = scrub_sectors(sctx, logical, l, src_physical, src_dev, in scrub_extent()
2738 struct scrub_ctx *sctx = sparity->sctx; in scrub_sectors_for_parity() local
2740 const u32 sectorsize = sctx->fs_info->sectorsize; in scrub_sectors_for_parity()
2745 sblock = alloc_scrub_block(sctx, dev, logical, physical, physical, mirror_num); in scrub_sectors_for_parity()
2747 spin_lock(&sctx->stat_lock); in scrub_sectors_for_parity()
2748 sctx->stat.malloc_errors++; in scrub_sectors_for_parity()
2749 spin_unlock(&sctx->stat_lock); in scrub_sectors_for_parity()
2761 spin_lock(&sctx->stat_lock); in scrub_sectors_for_parity()
2762 sctx->stat.malloc_errors++; in scrub_sectors_for_parity()
2763 spin_unlock(&sctx->stat_lock); in scrub_sectors_for_parity()
2775 memcpy(sector->csum, csum, sctx->fs_info->csum_size); in scrub_sectors_for_parity()
2791 ret = scrub_add_sector_to_rd_bio(sctx, sector); in scrub_sectors_for_parity()
2808 struct scrub_ctx *sctx = sparity->sctx; in scrub_extent_for_parity() local
2823 blocksize = sctx->fs_info->sectorsize; in scrub_extent_for_parity()
2833 have_csum = scrub_find_csum(sctx, logical, csum); in scrub_extent_for_parity()
2896 struct scrub_ctx *sctx = sparity->sctx; in scrub_free_parity() local
2902 spin_lock(&sctx->stat_lock); in scrub_free_parity()
2903 sctx->stat.read_errors += nbits; in scrub_free_parity()
2904 sctx->stat.uncorrectable_errors += nbits; in scrub_free_parity()
2905 spin_unlock(&sctx->stat_lock); in scrub_free_parity()
2920 struct scrub_ctx *sctx = sparity->sctx; in scrub_parity_bio_endio_worker() local
2922 btrfs_bio_counter_dec(sctx->fs_info); in scrub_parity_bio_endio_worker()
2924 scrub_pending_bio_dec(sctx); in scrub_parity_bio_endio_worker()
2930 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; in scrub_parity_bio_endio()
2944 struct scrub_ctx *sctx = sparity->sctx; in scrub_parity_check_and_repair() local
2945 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_parity_check_and_repair()
2977 scrub_pending_bio_inc(sctx); in scrub_parity_check_and_repair()
2987 spin_lock(&sctx->stat_lock); in scrub_parity_check_and_repair()
2988 sctx->stat.malloc_errors++; in scrub_parity_check_and_repair()
2989 spin_unlock(&sctx->stat_lock); in scrub_parity_check_and_repair()
3142 static int scrub_raid56_data_stripe_for_parity(struct scrub_ctx *sctx, in scrub_raid56_data_stripe_for_parity() argument
3149 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_raid56_data_stripe_for_parity()
3190 spin_lock(&sctx->stat_lock); in scrub_raid56_data_stripe_for_parity()
3191 sctx->stat.uncorrectable_errors++; in scrub_raid56_data_stripe_for_parity()
3192 spin_unlock(&sctx->stat_lock); in scrub_raid56_data_stripe_for_parity()
3226 &sctx->csum_list, 1, false); in scrub_raid56_data_stripe_for_parity()
3237 scrub_free_csums(sctx); in scrub_raid56_data_stripe_for_parity()
3252 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, in scrub_raid56_parity() argument
3258 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_raid56_parity()
3267 spin_lock(&sctx->stat_lock); in scrub_raid56_parity()
3268 sctx->stat.malloc_errors++; in scrub_raid56_parity()
3269 spin_unlock(&sctx->stat_lock); in scrub_raid56_parity()
3280 spin_lock(&sctx->stat_lock); in scrub_raid56_parity()
3281 sctx->stat.malloc_errors++; in scrub_raid56_parity()
3282 spin_unlock(&sctx->stat_lock); in scrub_raid56_parity()
3290 sparity->sctx = sctx; in scrub_raid56_parity()
3300 ret = scrub_raid56_data_stripe_for_parity(sctx, sparity, map, in scrub_raid56_parity()
3307 scrub_submit(sctx); in scrub_raid56_parity()
3308 mutex_lock(&sctx->wr_lock); in scrub_raid56_parity()
3309 scrub_wr_submit(sctx); in scrub_raid56_parity()
3310 mutex_unlock(&sctx->wr_lock); in scrub_raid56_parity()
3316 static void sync_replace_for_zoned(struct scrub_ctx *sctx) in sync_replace_for_zoned() argument
3318 if (!btrfs_is_zoned(sctx->fs_info)) in sync_replace_for_zoned()
3321 sctx->flush_all_writes = true; in sync_replace_for_zoned()
3322 scrub_submit(sctx); in sync_replace_for_zoned()
3323 mutex_lock(&sctx->wr_lock); in sync_replace_for_zoned()
3324 scrub_wr_submit(sctx); in sync_replace_for_zoned()
3325 mutex_unlock(&sctx->wr_lock); in sync_replace_for_zoned()
3327 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); in sync_replace_for_zoned()
3330 static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical, in sync_write_pointer_for_zoned() argument
3333 struct btrfs_fs_info *fs_info = sctx->fs_info; in sync_write_pointer_for_zoned()
3339 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); in sync_write_pointer_for_zoned()
3341 mutex_lock(&sctx->wr_lock); in sync_write_pointer_for_zoned()
3342 if (sctx->write_pointer < physical_end) { in sync_write_pointer_for_zoned()
3343 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical, in sync_write_pointer_for_zoned()
3345 sctx->write_pointer); in sync_write_pointer_for_zoned()
3350 mutex_unlock(&sctx->wr_lock); in sync_write_pointer_for_zoned()
3351 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical); in sync_write_pointer_for_zoned()
3364 static int scrub_simple_mirror(struct scrub_ctx *sctx, in scrub_simple_mirror() argument
3373 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_simple_mirror()
3396 atomic_read(&sctx->cancel_req)) { in scrub_simple_mirror()
3403 sctx->flush_all_writes = true; in scrub_simple_mirror()
3404 scrub_submit(sctx); in scrub_simple_mirror()
3405 mutex_lock(&sctx->wr_lock); in scrub_simple_mirror()
3406 scrub_wr_submit(sctx); in scrub_simple_mirror()
3407 mutex_unlock(&sctx->wr_lock); in scrub_simple_mirror()
3408 wait_event(sctx->list_wait, in scrub_simple_mirror()
3409 atomic_read(&sctx->bios_in_flight) == 0); in scrub_simple_mirror()
3410 sctx->flush_all_writes = false; in scrub_simple_mirror()
3426 sctx->stat.last_physical = physical + logical_length; in scrub_simple_mirror()
3452 &sctx->csum_list, 1, false); in scrub_simple_mirror()
3462 spin_lock(&sctx->stat_lock); in scrub_simple_mirror()
3463 sctx->stat.uncorrectable_errors++; in scrub_simple_mirror()
3464 spin_unlock(&sctx->stat_lock); in scrub_simple_mirror()
3468 ret = scrub_extent(sctx, map, cur_logical, scrub_len, in scrub_simple_mirror()
3472 scrub_free_csums(sctx); in scrub_simple_mirror()
3475 if (sctx->is_dev_replace) in scrub_simple_mirror()
3476 sync_replace_for_zoned(sctx); in scrub_simple_mirror()
3521 static int scrub_simple_stripe(struct scrub_ctx *sctx, in scrub_simple_stripe() argument
3543 ret = scrub_simple_mirror(sctx, extent_root, csum_root, bg, map, in scrub_simple_stripe()
3556 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, in scrub_stripe() argument
3563 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe()
3597 wait_event(sctx->list_wait, in scrub_stripe()
3598 atomic_read(&sctx->bios_in_flight) == 0); in scrub_stripe()
3610 if (sctx->is_dev_replace && in scrub_stripe()
3611 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) { in scrub_stripe()
3612 mutex_lock(&sctx->wr_lock); in scrub_stripe()
3613 sctx->write_pointer = physical; in scrub_stripe()
3614 mutex_unlock(&sctx->wr_lock); in scrub_stripe()
3615 sctx->flush_all_writes = true; in scrub_stripe()
3635 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map, in scrub_stripe()
3643 ret = scrub_simple_stripe(sctx, root, csum_root, bg, map, in scrub_stripe()
3674 ret = scrub_raid56_parity(sctx, map, scrub_dev, in scrub_stripe()
3690 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map, in scrub_stripe()
3698 spin_lock(&sctx->stat_lock); in scrub_stripe()
3700 sctx->stat.last_physical = in scrub_stripe()
3703 sctx->stat.last_physical = physical; in scrub_stripe()
3704 spin_unlock(&sctx->stat_lock); in scrub_stripe()
3710 scrub_submit(sctx); in scrub_stripe()
3711 mutex_lock(&sctx->wr_lock); in scrub_stripe()
3712 scrub_wr_submit(sctx); in scrub_stripe()
3713 mutex_unlock(&sctx->wr_lock); in scrub_stripe()
3718 if (sctx->is_dev_replace && ret >= 0) { in scrub_stripe()
3721 ret2 = sync_write_pointer_for_zoned(sctx, in scrub_stripe()
3732 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, in scrub_chunk() argument
3738 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_chunk()
3770 ret = scrub_stripe(sctx, bg, em, scrub_dev, i); in scrub_chunk()
3801 int scrub_enumerate_chunks(struct scrub_ctx *sctx, in scrub_enumerate_chunks() argument
3806 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_enumerate_chunks()
3912 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) { in scrub_enumerate_chunks()
3976 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace); in scrub_enumerate_chunks()
3977 if (!ret && sctx->is_dev_replace) { in scrub_enumerate_chunks()
3989 } else if (ret == -ENOSPC && !sctx->is_dev_replace) { in scrub_enumerate_chunks()
4019 if (sctx->is_dev_replace) { in scrub_enumerate_chunks()
4032 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset, in scrub_enumerate_chunks()
4045 sctx->flush_all_writes = true; in scrub_enumerate_chunks()
4046 scrub_submit(sctx); in scrub_enumerate_chunks()
4047 mutex_lock(&sctx->wr_lock); in scrub_enumerate_chunks()
4048 scrub_wr_submit(sctx); in scrub_enumerate_chunks()
4049 mutex_unlock(&sctx->wr_lock); in scrub_enumerate_chunks()
4051 wait_event(sctx->list_wait, in scrub_enumerate_chunks()
4052 atomic_read(&sctx->bios_in_flight) == 0); in scrub_enumerate_chunks()
4061 wait_event(sctx->list_wait, in scrub_enumerate_chunks()
4062 atomic_read(&sctx->workers_pending) == 0); in scrub_enumerate_chunks()
4063 sctx->flush_all_writes = false; in scrub_enumerate_chunks()
4067 if (sctx->is_dev_replace && in scrub_enumerate_chunks()
4104 if (sctx->is_dev_replace && in scrub_enumerate_chunks()
4109 if (sctx->stat.malloc_errors > 0) { in scrub_enumerate_chunks()
4123 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, in scrub_supers() argument
4130 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_supers()
4149 ret = scrub_sectors(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, in scrub_supers()
4155 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); in scrub_supers()
4244 struct scrub_ctx *sctx; in btrfs_scrub_dev() local
4265 sctx = scrub_setup_ctx(fs_info, is_dev_replace); in btrfs_scrub_dev()
4266 if (IS_ERR(sctx)) in btrfs_scrub_dev()
4267 return PTR_ERR(sctx); in btrfs_scrub_dev()
4313 sctx->readonly = readonly; in btrfs_scrub_dev()
4314 dev->scrub_ctx = sctx; in btrfs_scrub_dev()
4338 spin_lock(&sctx->stat_lock); in btrfs_scrub_dev()
4339 old_super_errors = sctx->stat.super_errors; in btrfs_scrub_dev()
4340 spin_unlock(&sctx->stat_lock); in btrfs_scrub_dev()
4348 ret = scrub_supers(sctx, dev); in btrfs_scrub_dev()
4351 spin_lock(&sctx->stat_lock); in btrfs_scrub_dev()
4357 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly) in btrfs_scrub_dev()
4359 spin_unlock(&sctx->stat_lock); in btrfs_scrub_dev()
4363 ret = scrub_enumerate_chunks(sctx, dev, start, end); in btrfs_scrub_dev()
4366 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); in btrfs_scrub_dev()
4370 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); in btrfs_scrub_dev()
4373 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_dev()
4384 scrub_put_ctx(sctx); in btrfs_scrub_dev()
4409 scrub_free_ctx(sctx); in btrfs_scrub_dev()
4459 struct scrub_ctx *sctx; in btrfs_scrub_cancel_dev() local
4462 sctx = dev->scrub_ctx; in btrfs_scrub_cancel_dev()
4463 if (!sctx) { in btrfs_scrub_cancel_dev()
4467 atomic_inc(&sctx->cancel_req); in btrfs_scrub_cancel_dev()
4484 struct scrub_ctx *sctx = NULL; in btrfs_scrub_progress() local
4489 sctx = dev->scrub_ctx; in btrfs_scrub_progress()
4490 if (sctx) in btrfs_scrub_progress()
4491 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_progress()
4494 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; in btrfs_scrub_progress()