Lines Matching +full:use +full:- +full:parity
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Fusion-io All rights reserved.
18 #include "disk-io.h"
21 #include "async-thread.h"
82 INIT_WORK(&rbio->work, work_func); in start_async_work()
83 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work); in start_async_work()
99 if (info->stripe_hash_table) in btrfs_alloc_stripe_hash_table()
111 return -ENOMEM; in btrfs_alloc_stripe_hash_table()
113 spin_lock_init(&table->cache_lock); in btrfs_alloc_stripe_hash_table()
114 INIT_LIST_HEAD(&table->stripe_cache); in btrfs_alloc_stripe_hash_table()
116 h = table->table; in btrfs_alloc_stripe_hash_table()
120 INIT_LIST_HEAD(&cur->hash_list); in btrfs_alloc_stripe_hash_table()
121 spin_lock_init(&cur->lock); in btrfs_alloc_stripe_hash_table()
124 x = cmpxchg(&info->stripe_hash_table, NULL, table); in btrfs_alloc_stripe_hash_table()
132 * use the page uptodate bit in the stripe cache array
147 for (i = 0; i < rbio->nr_sectors; i++) { in cache_rbio_pages()
149 if (!rbio->bio_sectors[i].page) in cache_rbio_pages()
152 ASSERT(rbio->stripe_sectors[i].page); in cache_rbio_pages()
153 memcpy_page(rbio->stripe_sectors[i].page, in cache_rbio_pages()
154 rbio->stripe_sectors[i].pgoff, in cache_rbio_pages()
155 rbio->bio_sectors[i].page, in cache_rbio_pages()
156 rbio->bio_sectors[i].pgoff, in cache_rbio_pages()
157 rbio->bioc->fs_info->sectorsize); in cache_rbio_pages()
158 rbio->stripe_sectors[i].uptodate = 1; in cache_rbio_pages()
160 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in cache_rbio_pages()
168 u64 num = rbio->bioc->raid_map[0]; in rbio_bucket()
184 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in full_page_sectors_uptodate()
188 ASSERT(page_nr < rbio->nr_pages); in full_page_sectors_uptodate()
193 if (!rbio->stripe_sectors[i].uptodate) in full_page_sectors_uptodate()
200 * Update the stripe_sectors[] array to use correct page and pgoff
206 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in index_stripe_sectors()
210 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) { in index_stripe_sectors()
213 ASSERT(page_index < rbio->nr_pages); in index_stripe_sectors()
214 rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index]; in index_stripe_sectors()
215 rbio->stripe_sectors[i].pgoff = offset_in_page(offset); in index_stripe_sectors()
222 const u32 sectorsize = src->bioc->fs_info->sectorsize; in steal_rbio_page()
226 if (dest->stripe_pages[page_nr]) in steal_rbio_page()
227 __free_page(dest->stripe_pages[page_nr]); in steal_rbio_page()
228 dest->stripe_pages[page_nr] = src->stripe_pages[page_nr]; in steal_rbio_page()
229 src->stripe_pages[page_nr] = NULL; in steal_rbio_page()
231 /* Also update the sector->uptodate bits. */ in steal_rbio_page()
234 dest->stripe_sectors[i].uptodate = true; in steal_rbio_page()
249 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) in steal_rbio()
252 for (i = 0; i < dest->nr_pages; i++) { in steal_rbio()
253 s = src->stripe_pages[i]; in steal_rbio()
268 * must be called with dest->rbio_list_lock held
273 bio_list_merge(&dest->bio_list, &victim->bio_list); in merge_rbio()
274 dest->bio_list_bytes += victim->bio_list_bytes; in merge_rbio()
276 bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap, in merge_rbio()
277 dest->stripe_nsectors); in merge_rbio()
278 bio_list_init(&victim->bio_list); in merge_rbio()
295 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in __remove_rbio_from_cache()
298 table = rbio->bioc->fs_info->stripe_hash_table; in __remove_rbio_from_cache()
299 h = table->table + bucket; in __remove_rbio_from_cache()
304 spin_lock(&h->lock); in __remove_rbio_from_cache()
310 spin_lock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
312 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { in __remove_rbio_from_cache()
313 list_del_init(&rbio->stripe_cache); in __remove_rbio_from_cache()
314 table->cache_size -= 1; in __remove_rbio_from_cache()
326 if (bio_list_empty(&rbio->bio_list)) { in __remove_rbio_from_cache()
327 if (!list_empty(&rbio->hash_list)) { in __remove_rbio_from_cache()
328 list_del_init(&rbio->hash_list); in __remove_rbio_from_cache()
329 refcount_dec(&rbio->refs); in __remove_rbio_from_cache()
330 BUG_ON(!list_empty(&rbio->plug_list)); in __remove_rbio_from_cache()
335 spin_unlock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
336 spin_unlock(&h->lock); in __remove_rbio_from_cache()
350 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in remove_rbio_from_cache()
353 table = rbio->bioc->fs_info->stripe_hash_table; in remove_rbio_from_cache()
355 spin_lock_irqsave(&table->cache_lock, flags); in remove_rbio_from_cache()
357 spin_unlock_irqrestore(&table->cache_lock, flags); in remove_rbio_from_cache()
369 table = info->stripe_hash_table; in btrfs_clear_rbio_cache()
371 spin_lock_irqsave(&table->cache_lock, flags); in btrfs_clear_rbio_cache()
372 while (!list_empty(&table->stripe_cache)) { in btrfs_clear_rbio_cache()
373 rbio = list_entry(table->stripe_cache.next, in btrfs_clear_rbio_cache()
378 spin_unlock_irqrestore(&table->cache_lock, flags); in btrfs_clear_rbio_cache()
387 if (!info->stripe_hash_table) in btrfs_free_stripe_hash_table()
390 kvfree(info->stripe_hash_table); in btrfs_free_stripe_hash_table()
391 info->stripe_hash_table = NULL; in btrfs_free_stripe_hash_table()
410 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) in cache_rbio()
413 table = rbio->bioc->fs_info->stripe_hash_table; in cache_rbio()
415 spin_lock_irqsave(&table->cache_lock, flags); in cache_rbio()
416 spin_lock(&rbio->bio_list_lock); in cache_rbio()
419 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) in cache_rbio()
420 refcount_inc(&rbio->refs); in cache_rbio()
422 if (!list_empty(&rbio->stripe_cache)){ in cache_rbio()
423 list_move(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
425 list_add(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
426 table->cache_size += 1; in cache_rbio()
429 spin_unlock(&rbio->bio_list_lock); in cache_rbio()
431 if (table->cache_size > RBIO_CACHE_SIZE) { in cache_rbio()
434 found = list_entry(table->stripe_cache.prev, in cache_rbio()
442 spin_unlock_irqrestore(&table->cache_lock, flags); in cache_rbio()
460 src_cnt -= xor_src_cnt; in run_xor()
472 unsigned long size = rbio->bio_list_bytes; in rbio_is_full()
475 spin_lock_irqsave(&rbio->bio_list_lock, flags); in rbio_is_full()
476 if (size != rbio->nr_data * BTRFS_STRIPE_LEN) in rbio_is_full()
478 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN); in rbio_is_full()
479 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in rbio_is_full()
497 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || in rbio_can_merge()
498 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) in rbio_can_merge()
508 if (test_bit(RBIO_CACHE_BIT, &last->flags) || in rbio_can_merge()
509 test_bit(RBIO_CACHE_BIT, &cur->flags)) in rbio_can_merge()
512 if (last->bioc->raid_map[0] != cur->bioc->raid_map[0]) in rbio_can_merge()
516 if (last->operation != cur->operation) in rbio_can_merge()
520 * check and repair the parity and write the new results. in rbio_can_merge()
526 if (last->operation == BTRFS_RBIO_PARITY_SCRUB) in rbio_can_merge()
529 if (last->operation == BTRFS_RBIO_REBUILD_MISSING) in rbio_can_merge()
532 if (last->operation == BTRFS_RBIO_READ_REBUILD) { in rbio_can_merge()
533 int fa = last->faila; in rbio_can_merge()
534 int fb = last->failb; in rbio_can_merge()
535 int cur_fa = cur->faila; in rbio_can_merge()
536 int cur_fb = cur->failb; in rbio_can_merge()
538 if (last->faila >= last->failb) { in rbio_can_merge()
539 fa = last->failb; in rbio_can_merge()
540 fb = last->faila; in rbio_can_merge()
543 if (cur->faila >= cur->failb) { in rbio_can_merge()
544 cur_fa = cur->failb; in rbio_can_merge()
545 cur_fb = cur->faila; in rbio_can_merge()
558 ASSERT(stripe_nr < rbio->real_stripes); in rbio_stripe_sector_index()
559 ASSERT(sector_nr < rbio->stripe_nsectors); in rbio_stripe_sector_index()
561 return stripe_nr * rbio->stripe_nsectors + sector_nr; in rbio_stripe_sector_index()
564 /* Return a sector from rbio->stripe_sectors, not from the bio list */
569 return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr, in rbio_stripe_sector()
577 return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr); in rbio_pstripe_sector()
584 if (rbio->nr_data + 1 == rbio->real_stripes) in rbio_qstripe_sector()
586 return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr); in rbio_qstripe_sector()
621 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio); in lock_stripe_add()
623 spin_lock_irqsave(&h->lock, flags); in lock_stripe_add()
624 list_for_each_entry(cur, &h->hash_list, hash_list) { in lock_stripe_add()
625 if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0]) in lock_stripe_add()
628 spin_lock(&cur->bio_list_lock); in lock_stripe_add()
631 if (bio_list_empty(&cur->bio_list) && in lock_stripe_add()
632 list_empty(&cur->plug_list) && in lock_stripe_add()
633 test_bit(RBIO_CACHE_BIT, &cur->flags) && in lock_stripe_add()
634 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { in lock_stripe_add()
635 list_del_init(&cur->hash_list); in lock_stripe_add()
636 refcount_dec(&cur->refs); in lock_stripe_add()
640 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
648 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
660 list_for_each_entry(pending, &cur->plug_list, plug_list) { in lock_stripe_add()
663 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
674 list_add_tail(&rbio->plug_list, &cur->plug_list); in lock_stripe_add()
675 spin_unlock(&cur->bio_list_lock); in lock_stripe_add()
680 refcount_inc(&rbio->refs); in lock_stripe_add()
681 list_add(&rbio->hash_list, &h->hash_list); in lock_stripe_add()
683 spin_unlock_irqrestore(&h->lock, flags); in lock_stripe_add()
692 * called as rmw or parity rebuild is completed. If the plug list has more
703 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket; in unlock_stripe()
705 if (list_empty(&rbio->plug_list)) in unlock_stripe()
708 spin_lock_irqsave(&h->lock, flags); in unlock_stripe()
709 spin_lock(&rbio->bio_list_lock); in unlock_stripe()
711 if (!list_empty(&rbio->hash_list)) { in unlock_stripe()
717 if (list_empty(&rbio->plug_list) && in unlock_stripe()
718 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { in unlock_stripe()
720 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in unlock_stripe()
721 BUG_ON(!bio_list_empty(&rbio->bio_list)); in unlock_stripe()
725 list_del_init(&rbio->hash_list); in unlock_stripe()
726 refcount_dec(&rbio->refs); in unlock_stripe()
729 * we use the plug list to hold all the rbios in unlock_stripe()
733 if (!list_empty(&rbio->plug_list)) { in unlock_stripe()
735 struct list_head *head = rbio->plug_list.next; in unlock_stripe()
740 list_del_init(&rbio->plug_list); in unlock_stripe()
742 list_add(&next->hash_list, &h->hash_list); in unlock_stripe()
743 refcount_inc(&next->refs); in unlock_stripe()
744 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
745 spin_unlock_irqrestore(&h->lock, flags); in unlock_stripe()
747 if (next->operation == BTRFS_RBIO_READ_REBUILD) in unlock_stripe()
749 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) { in unlock_stripe()
752 } else if (next->operation == BTRFS_RBIO_WRITE) { in unlock_stripe()
755 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { in unlock_stripe()
764 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
765 spin_unlock_irqrestore(&h->lock, flags); in unlock_stripe()
776 if (!refcount_dec_and_test(&rbio->refs)) in __free_raid_bio()
779 WARN_ON(!list_empty(&rbio->stripe_cache)); in __free_raid_bio()
780 WARN_ON(!list_empty(&rbio->hash_list)); in __free_raid_bio()
781 WARN_ON(!bio_list_empty(&rbio->bio_list)); in __free_raid_bio()
783 for (i = 0; i < rbio->nr_pages; i++) { in __free_raid_bio()
784 if (rbio->stripe_pages[i]) { in __free_raid_bio()
785 __free_page(rbio->stripe_pages[i]); in __free_raid_bio()
786 rbio->stripe_pages[i] = NULL; in __free_raid_bio()
790 btrfs_put_bioc(rbio->bioc); in __free_raid_bio()
799 next = cur->bi_next; in rbio_endio_bio_list()
800 cur->bi_next = NULL; in rbio_endio_bio_list()
801 cur->bi_status = err; in rbio_endio_bio_list()
813 struct bio *cur = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
821 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors); in rbio_orig_end_io()
824 * At this moment, rbio->bio_list is empty, however since rbio does not in rbio_orig_end_io()
826 * hash list, rbio may be merged with others so that rbio->bio_list in rbio_orig_end_io()
827 * becomes non-empty. in rbio_orig_end_io()
828 * Once unlock_stripe() is done, rbio->bio_list will not be updated any in rbio_orig_end_io()
832 extra = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
846 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_write_end_io()
847 blk_status_t err = bio->bi_status; in raid_write_end_io()
855 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_write_end_io()
861 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? in raid_write_end_io()
862 0 : rbio->bioc->max_errors; in raid_write_end_io()
863 if (atomic_read(&rbio->error) > max_errors) in raid_write_end_io()
876 * @bio_list_only: Whether to use sectors inside the bio list only.
879 * as possible, and only use stripe_sectors as fallback.
888 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes); in sector_in_rbio()
889 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors); in sector_in_rbio()
891 index = stripe_nr * rbio->stripe_nsectors + sector_nr; in sector_in_rbio()
892 ASSERT(index >= 0 && index < rbio->nr_sectors); in sector_in_rbio()
894 spin_lock_irq(&rbio->bio_list_lock); in sector_in_rbio()
895 sector = &rbio->bio_sectors[index]; in sector_in_rbio()
896 if (sector->page || bio_list_only) { in sector_in_rbio()
898 if (!sector->page) in sector_in_rbio()
900 spin_unlock_irq(&rbio->bio_list_lock); in sector_in_rbio()
903 spin_unlock_irq(&rbio->bio_list_lock); in sector_in_rbio()
905 return &rbio->stripe_sectors[index]; in sector_in_rbio()
910 * this does not allocate any pages for rbio->pages.
915 const unsigned int real_stripes = bioc->num_stripes - bioc->num_tgtdevs; in alloc_rbio()
919 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; in alloc_rbio()
925 ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize)); in alloc_rbio()
933 sizeof(*rbio->stripe_pages) * num_pages + in alloc_rbio()
934 sizeof(*rbio->bio_sectors) * num_sectors + in alloc_rbio()
935 sizeof(*rbio->stripe_sectors) * num_sectors + in alloc_rbio()
936 sizeof(*rbio->finish_pointers) * real_stripes, in alloc_rbio()
939 return ERR_PTR(-ENOMEM); in alloc_rbio()
941 bio_list_init(&rbio->bio_list); in alloc_rbio()
942 INIT_LIST_HEAD(&rbio->plug_list); in alloc_rbio()
943 spin_lock_init(&rbio->bio_list_lock); in alloc_rbio()
944 INIT_LIST_HEAD(&rbio->stripe_cache); in alloc_rbio()
945 INIT_LIST_HEAD(&rbio->hash_list); in alloc_rbio()
947 rbio->bioc = bioc; in alloc_rbio()
948 rbio->nr_pages = num_pages; in alloc_rbio()
949 rbio->nr_sectors = num_sectors; in alloc_rbio()
950 rbio->real_stripes = real_stripes; in alloc_rbio()
951 rbio->stripe_npages = stripe_npages; in alloc_rbio()
952 rbio->stripe_nsectors = stripe_nsectors; in alloc_rbio()
953 rbio->faila = -1; in alloc_rbio()
954 rbio->failb = -1; in alloc_rbio()
955 refcount_set(&rbio->refs, 1); in alloc_rbio()
956 atomic_set(&rbio->error, 0); in alloc_rbio()
957 atomic_set(&rbio->stripes_pending, 0); in alloc_rbio()
968 CONSUME_ALLOC(rbio->stripe_pages, num_pages); in alloc_rbio()
969 CONSUME_ALLOC(rbio->bio_sectors, num_sectors); in alloc_rbio()
970 CONSUME_ALLOC(rbio->stripe_sectors, num_sectors); in alloc_rbio()
971 CONSUME_ALLOC(rbio->finish_pointers, real_stripes); in alloc_rbio()
974 ASSERT(btrfs_nr_parity_stripes(bioc->map_type)); in alloc_rbio()
975 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type); in alloc_rbio()
980 /* allocate pages for all the stripes in the bio, including parity */
985 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages); in alloc_rbio_pages()
996 const int data_pages = rbio->nr_data * rbio->stripe_npages; in alloc_rbio_parity_pages()
999 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages, in alloc_rbio_parity_pages()
1000 rbio->stripe_pages + data_pages); in alloc_rbio_parity_pages()
1021 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in rbio_add_io_sector()
1022 struct bio *last = bio_list->tail; in rbio_add_io_sector()
1030 * thus it can be larger than rbio->real_stripe. in rbio_add_io_sector()
1031 * So here we check against bioc->num_stripes, not rbio->real_stripes. in rbio_add_io_sector()
1033 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes); in rbio_add_io_sector()
1034 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors); in rbio_add_io_sector()
1035 ASSERT(sector->page); in rbio_add_io_sector()
1037 stripe = &rbio->bioc->stripes[stripe_nr]; in rbio_add_io_sector()
1038 disk_start = stripe->physical + sector_nr * sectorsize; in rbio_add_io_sector()
1041 if (!stripe->dev->bdev) in rbio_add_io_sector()
1046 u64 last_end = last->bi_iter.bi_sector << 9; in rbio_add_io_sector()
1047 last_end += last->bi_iter.bi_size; in rbio_add_io_sector()
1053 if (last_end == disk_start && !last->bi_status && in rbio_add_io_sector()
1054 last->bi_bdev == stripe->dev->bdev) { in rbio_add_io_sector()
1055 ret = bio_add_page(last, sector->page, sectorsize, in rbio_add_io_sector()
1056 sector->pgoff); in rbio_add_io_sector()
1063 bio = bio_alloc(stripe->dev->bdev, in rbio_add_io_sector()
1066 bio->bi_iter.bi_sector = disk_start >> 9; in rbio_add_io_sector()
1067 bio->bi_private = rbio; in rbio_add_io_sector()
1069 bio_add_page(bio, sector->page, sectorsize, sector->pgoff); in rbio_add_io_sector()
1078 * trigger parity reconstruction. The rmw will be finished
1083 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_rmw()
1084 BUG_ON(rbio->faila == rbio->real_stripes - 1); in validate_rbio_for_rmw()
1093 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in index_one_bio()
1096 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - in index_one_bio()
1097 rbio->bioc->raid_map[0]; in index_one_bio()
1105 struct sector_ptr *sector = &rbio->bio_sectors[index]; in index_one_bio()
1107 sector->page = bvec.bv_page; in index_one_bio()
1108 sector->pgoff = bvec.bv_offset + bvec_offset; in index_one_bio()
1109 ASSERT(sector->pgoff < PAGE_SIZE); in index_one_bio()
1126 spin_lock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1127 bio_list_for_each(bio, &rbio->bio_list) in index_rbio_pages()
1130 spin_unlock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1136 const struct btrfs_io_context *bioc = rbio->bioc; in bio_get_trace_info()
1141 /* We rely on bio->bi_bdev to find the stripe number. */ in bio_get_trace_info()
1142 if (!bio->bi_bdev) in bio_get_trace_info()
1145 for (i = 0; i < bioc->num_stripes; i++) { in bio_get_trace_info()
1146 if (bio->bi_bdev != bioc->stripes[i].dev->bdev) in bio_get_trace_info()
1148 trace_info->stripe_nr = i; in bio_get_trace_info()
1149 trace_info->devid = bioc->stripes[i].dev->devid; in bio_get_trace_info()
1150 trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - in bio_get_trace_info()
1151 bioc->stripes[i].physical; in bio_get_trace_info()
1156 trace_info->devid = -1; in bio_get_trace_info()
1157 trace_info->offset = -1; in bio_get_trace_info()
1158 trace_info->stripe_nr = -1; in bio_get_trace_info()
1166 * This will calculate the parity and then send down any
1171 struct btrfs_io_context *bioc = rbio->bioc; in finish_rmw()
1172 const u32 sectorsize = bioc->fs_info->sectorsize; in finish_rmw()
1173 void **pointers = rbio->finish_pointers; in finish_rmw()
1174 int nr_data = rbio->nr_data; in finish_rmw()
1187 if (rbio->real_stripes - rbio->nr_data == 1) in finish_rmw()
1189 else if (rbio->real_stripes - rbio->nr_data == 2) in finish_rmw()
1195 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors)); in finish_rmw()
1199 * recalculate the parity and write the new results. in finish_rmw()
1205 spin_lock_irq(&rbio->bio_list_lock); in finish_rmw()
1206 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in finish_rmw()
1207 spin_unlock_irq(&rbio->bio_list_lock); in finish_rmw()
1209 atomic_set(&rbio->error, 0); in finish_rmw()
1216 * the higher layers are unlikely to use this area of in finish_rmw()
1217 * the disk again soon. If they do use it again, in finish_rmw()
1224 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_rmw()
1226 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { in finish_rmw()
1232 pointers[stripe] = kmap_local_page(sector->page) + in finish_rmw()
1233 sector->pgoff; in finish_rmw()
1236 /* Then add the parity stripe */ in finish_rmw()
1238 sector->uptodate = 1; in finish_rmw()
1239 pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff; in finish_rmw()
1247 sector->uptodate = 1; in finish_rmw()
1248 pointers[stripe++] = kmap_local_page(sector->page) + in finish_rmw()
1249 sector->pgoff; in finish_rmw()
1251 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, in finish_rmw()
1256 run_xor(pointers + 1, nr_data - 1, sectorsize); in finish_rmw()
1258 for (stripe = stripe - 1; stripe >= 0; stripe--) in finish_rmw()
1266 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in finish_rmw()
1270 stripe = total_sector_nr / rbio->stripe_nsectors; in finish_rmw()
1271 sectornr = total_sector_nr % rbio->stripe_nsectors; in finish_rmw()
1274 if (!test_bit(sectornr, &rbio->dbitmap)) in finish_rmw()
1277 if (stripe < rbio->nr_data) { in finish_rmw()
1291 if (likely(!bioc->num_tgtdevs)) in finish_rmw()
1294 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in finish_rmw()
1298 stripe = total_sector_nr / rbio->stripe_nsectors; in finish_rmw()
1299 sectornr = total_sector_nr % rbio->stripe_nsectors; in finish_rmw()
1301 if (!bioc->tgtdev_map[stripe]) { in finish_rmw()
1307 total_sector_nr += rbio->stripe_nsectors - 1; in finish_rmw()
1312 if (!test_bit(sectornr, &rbio->dbitmap)) in finish_rmw()
1315 if (stripe < rbio->nr_data) { in finish_rmw()
1324 rbio->bioc->tgtdev_map[stripe], in finish_rmw()
1331 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); in finish_rmw()
1332 BUG_ON(atomic_read(&rbio->stripes_pending) == 0); in finish_rmw()
1335 bio->bi_end_io = raid_write_end_io; in finish_rmw()
1362 u64 physical = bio->bi_iter.bi_sector; in find_bio_stripe()
1368 for (i = 0; i < rbio->bioc->num_stripes; i++) { in find_bio_stripe()
1369 stripe = &rbio->bioc->stripes[i]; in find_bio_stripe()
1370 if (in_range(physical, stripe->physical, BTRFS_STRIPE_LEN) && in find_bio_stripe()
1371 stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) { in find_bio_stripe()
1375 return -1; in find_bio_stripe()
1386 u64 logical = bio->bi_iter.bi_sector << 9; in find_logical_bio_stripe()
1389 for (i = 0; i < rbio->nr_data; i++) { in find_logical_bio_stripe()
1390 u64 stripe_start = rbio->bioc->raid_map[i]; in find_logical_bio_stripe()
1395 return -1; in find_logical_bio_stripe()
1399 * returns -EIO if we had too many failures
1406 spin_lock_irqsave(&rbio->bio_list_lock, flags); in fail_rbio_index()
1409 if (rbio->faila == failed || rbio->failb == failed) in fail_rbio_index()
1412 if (rbio->faila == -1) { in fail_rbio_index()
1414 rbio->faila = failed; in fail_rbio_index()
1415 atomic_inc(&rbio->error); in fail_rbio_index()
1416 } else if (rbio->failb == -1) { in fail_rbio_index()
1418 rbio->failb = failed; in fail_rbio_index()
1419 atomic_inc(&rbio->error); in fail_rbio_index()
1421 ret = -EIO; in fail_rbio_index()
1424 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in fail_rbio_index()
1439 return -EIO; in fail_bio_stripe()
1454 for (i = 0; i < rbio->nr_sectors; i++) { in find_stripe_sector()
1455 struct sector_ptr *sector = &rbio->stripe_sectors[i]; in find_stripe_sector()
1457 if (sector->page == page && sector->pgoff == pgoff) in find_stripe_sector()
1469 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in set_bio_pages_uptodate()
1479 for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len; in set_bio_pages_uptodate()
1481 sector = find_stripe_sector(rbio, bvec->bv_page, pgoff); in set_bio_pages_uptodate()
1484 sector->uptodate = 1; in set_bio_pages_uptodate()
1491 struct btrfs_raid_bio *rbio = bio->bi_private; in raid56_bio_end_io()
1493 if (bio->bi_status) in raid56_bio_end_io()
1500 if (atomic_dec_and_test(&rbio->stripes_pending)) in raid56_bio_end_io()
1501 queue_work(rbio->bioc->fs_info->endio_raid56_workers, in raid56_bio_end_io()
1502 &rbio->end_io_work); in raid56_bio_end_io()
1508 * parity of the stripe.
1511 * may trigger parity reconstruction if we had any errors along the way
1518 if (atomic_read(&rbio->error) > rbio->bioc->max_errors) { in raid56_rmw_end_io_work()
1525 * are any failed stripes we'll reconstruct from parity first. in raid56_rmw_end_io_work()
1538 const int nr_data_sectors = rbio->stripe_nsectors * rbio->nr_data; in raid56_rmw_stripe()
1551 atomic_set(&rbio->error, 0); in raid56_rmw_stripe()
1556 int stripe = total_sector_nr / rbio->stripe_nsectors; in raid56_rmw_stripe()
1557 int sectornr = total_sector_nr % rbio->stripe_nsectors; in raid56_rmw_stripe()
1571 * use it. in raid56_rmw_stripe()
1573 if (sector->uptodate) in raid56_rmw_stripe()
1597 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_rmw_stripe()
1598 INIT_WORK(&rbio->end_io_work, raid56_rmw_end_io_work); in raid56_rmw_stripe()
1600 bio->bi_end_io = raid56_bio_end_io; in raid56_rmw_stripe()
1619 return -EIO; in raid56_rmw_stripe()
1628 * enough pages to hold the parity, and sending it all down quickly.
1647 * rbio before calculating new parity
1661 * recalculate parity, enough new bios come into create
1674 * We use plugging call backs to collect full stripes.
1697 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; in plug_cmp()
1698 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; in plug_cmp()
1701 return -1; in plug_cmp()
1717 list_sort(NULL, &plug->rbio_list, plug_cmp); in run_plug()
1718 while (!list_empty(&plug->rbio_list)) { in run_plug()
1719 cur = list_entry(plug->rbio_list.next, in run_plug()
1721 list_del_init(&cur->plug_list); in run_plug()
1765 INIT_WORK(&plug->work, unplug_work); in btrfs_raid_unplug()
1766 queue_work(plug->info->rmw_workers, &plug->work); in btrfs_raid_unplug()
1772 /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1775 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in rbio_add_bio()
1776 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT; in rbio_add_bio()
1777 const u64 full_stripe_start = rbio->bioc->raid_map[0]; in rbio_add_bio()
1778 const u32 orig_len = orig_bio->bi_iter.bi_size; in rbio_add_bio()
1779 const u32 sectorsize = fs_info->sectorsize; in rbio_add_bio()
1784 rbio->nr_data * BTRFS_STRIPE_LEN); in rbio_add_bio()
1786 bio_list_add(&rbio->bio_list, orig_bio); in rbio_add_bio()
1787 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size; in rbio_add_bio()
1792 int bit = ((u32)(cur_logical - full_stripe_start) >> in rbio_add_bio()
1793 fs_info->sectorsize_bits) % rbio->stripe_nsectors; in rbio_add_bio()
1795 set_bit(bit, &rbio->dbitmap); in rbio_add_bio()
1804 struct btrfs_fs_info *fs_info = bioc->fs_info; in raid56_parity_write()
1815 rbio->operation = BTRFS_RBIO_WRITE; in raid56_parity_write()
1834 if (!plug->info) { in raid56_parity_write()
1835 plug->info = fs_info; in raid56_parity_write()
1836 INIT_LIST_HEAD(&plug->rbio_list); in raid56_parity_write()
1838 list_add_tail(&rbio->plug_list, &plug->rbio_list); in raid56_parity_write()
1850 bio->bi_status = errno_to_blk_status(ret); in raid56_parity_write()
1855 * all parity reconstruction happens here. We've read in everything
1861 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in __raid_recover_end_io()
1865 int faila = -1, failb = -1; in __raid_recover_end_io()
1873 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in __raid_recover_end_io()
1883 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in __raid_recover_end_io()
1889 faila = rbio->faila; in __raid_recover_end_io()
1890 failb = rbio->failb; in __raid_recover_end_io()
1892 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1893 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { in __raid_recover_end_io()
1894 spin_lock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1895 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in __raid_recover_end_io()
1896 spin_unlock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1901 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { in __raid_recover_end_io()
1905 * Now we just use bitmap to mark the horizontal stripes in in __raid_recover_end_io()
1906 * which we have data when doing parity scrub. in __raid_recover_end_io()
1908 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && in __raid_recover_end_io()
1909 !test_bit(sectornr, &rbio->dbitmap)) in __raid_recover_end_io()
1918 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1920 * If we're rebuilding a read, we have to use in __raid_recover_end_io()
1923 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1924 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && in __raid_recover_end_io()
1930 ASSERT(sector->page); in __raid_recover_end_io()
1931 pointers[stripe] = kmap_local_page(sector->page) + in __raid_recover_end_io()
1932 sector->pgoff; in __raid_recover_end_io()
1937 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) { in __raid_recover_end_io()
1938 /* Single failure, rebuild from parity raid5 style */ in __raid_recover_end_io()
1940 if (faila == rbio->nr_data) { in __raid_recover_end_io()
1966 if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) { in __raid_recover_end_io()
1967 if (rbio->bioc->raid_map[faila] == in __raid_recover_end_io()
1979 if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) { in __raid_recover_end_io()
1980 raid6_datap_recov(rbio->real_stripes, in __raid_recover_end_io()
1983 raid6_2data_recov(rbio->real_stripes, in __raid_recover_end_io()
1991 BUG_ON(failb != -1); in __raid_recover_end_io()
1993 /* Copy parity block into failed block to start with */ in __raid_recover_end_io()
1994 memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize); in __raid_recover_end_io()
1998 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) in __raid_recover_end_io()
2000 pointers[rbio->nr_data - 1] = p; in __raid_recover_end_io()
2003 run_xor(pointers, rbio->nr_data - 1, sectorsize); in __raid_recover_end_io()
2011 if (rbio->operation == BTRFS_RBIO_WRITE) { in __raid_recover_end_io()
2012 for (i = 0; i < rbio->stripe_nsectors; i++) { in __raid_recover_end_io()
2013 if (faila != -1) { in __raid_recover_end_io()
2015 sector->uptodate = 1; in __raid_recover_end_io()
2017 if (failb != -1) { in __raid_recover_end_io()
2019 sector->uptodate = 1; in __raid_recover_end_io()
2023 for (stripe = rbio->real_stripes - 1; stripe >= 0; stripe--) in __raid_recover_end_io()
2039 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
2040 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { in __raid_recover_end_io()
2042 * - In case of two failures, where rbio->failb != -1: in __raid_recover_end_io()
2047 * on-disk content any more, otherwise, a later write/recover in __raid_recover_end_io()
2051 * - In case of single failure, where rbio->failb == -1: in __raid_recover_end_io()
2056 if (err == BLK_STS_OK && rbio->failb < 0) in __raid_recover_end_io()
2059 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in __raid_recover_end_io()
2063 rbio->faila = -1; in __raid_recover_end_io()
2064 rbio->failb = -1; in __raid_recover_end_io()
2066 if (rbio->operation == BTRFS_RBIO_WRITE) in __raid_recover_end_io()
2068 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) in __raid_recover_end_io()
2079 * parity.
2086 if (atomic_read(&rbio->error) > rbio->bioc->max_errors) in raid_recover_end_io_work()
2094 * the parity. endio handlers trigger final reconstruction
2098 * parity construction required to finish a rmw cycle.
2114 atomic_set(&rbio->error, 0); in __raid56_parity_recover()
2122 * So here we always re-read everything in recovery path. in __raid56_parity_recover()
2124 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in __raid56_parity_recover()
2126 int stripe = total_sector_nr / rbio->stripe_nsectors; in __raid56_parity_recover()
2127 int sectornr = total_sector_nr % rbio->stripe_nsectors; in __raid56_parity_recover()
2130 if (rbio->faila == stripe || rbio->failb == stripe) { in __raid56_parity_recover()
2131 atomic_inc(&rbio->error); in __raid56_parity_recover()
2134 total_sector_nr += rbio->stripe_nsectors - 1; in __raid56_parity_recover()
2151 if (atomic_read(&rbio->error) <= rbio->bioc->max_errors) { in __raid56_parity_recover()
2163 atomic_set(&rbio->stripes_pending, bios_to_read); in __raid56_parity_recover()
2164 INIT_WORK(&rbio->end_io_work, raid_recover_end_io_work); in __raid56_parity_recover()
2166 bio->bi_end_io = raid56_bio_end_io; in __raid56_parity_recover()
2180 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid56_parity_recover()
2181 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) in __raid56_parity_recover()
2187 return -EIO; in __raid56_parity_recover()
2199 struct btrfs_fs_info *fs_info = bioc->fs_info; in raid56_parity_recover()
2204 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); in raid56_parity_recover()
2208 rbio->operation = BTRFS_RBIO_READ_REBUILD; in raid56_parity_recover()
2211 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_parity_recover()
2212 if (rbio->faila == -1) { in raid56_parity_recover()
2215 __func__, bio->bi_iter.bi_sector << 9, in raid56_parity_recover()
2216 (u64)bio->bi_iter.bi_size, bioc->map_type); in raid56_parity_recover()
2218 bio->bi_status = BLK_STS_IOERR; in raid56_parity_recover()
2233 rbio->failb = rbio->real_stripes - (mirror_num - 1); in raid56_parity_recover()
2234 ASSERT(rbio->failb > 0); in raid56_parity_recover()
2235 if (rbio->failb <= rbio->faila) in raid56_parity_recover()
2236 rbio->failb--; in raid56_parity_recover()
2270 * The following code is used to scrub/replace the parity stripe
2284 struct btrfs_fs_info *fs_info = bioc->fs_info; in raid56_parity_alloc_scrub_rbio()
2291 bio_list_add(&rbio->bio_list, bio); in raid56_parity_alloc_scrub_rbio()
2296 ASSERT(!bio->bi_iter.bi_size); in raid56_parity_alloc_scrub_rbio()
2297 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; in raid56_parity_alloc_scrub_rbio()
2301 * to the end position, so this search can start from the first parity in raid56_parity_alloc_scrub_rbio()
2304 for (i = rbio->nr_data; i < rbio->real_stripes; i++) { in raid56_parity_alloc_scrub_rbio()
2305 if (bioc->stripes[i].dev == scrub_dev) { in raid56_parity_alloc_scrub_rbio()
2306 rbio->scrubp = i; in raid56_parity_alloc_scrub_rbio()
2310 ASSERT(i < rbio->real_stripes); in raid56_parity_alloc_scrub_rbio()
2312 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2316 /* Used for both parity scrub and missing. */
2320 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in raid56_add_scrub_pages()
2324 ASSERT(logical >= rbio->bioc->raid_map[0]); in raid56_add_scrub_pages()
2325 ASSERT(logical + sectorsize <= rbio->bioc->raid_map[0] + in raid56_add_scrub_pages()
2326 BTRFS_STRIPE_LEN * rbio->nr_data); in raid56_add_scrub_pages()
2327 stripe_offset = (int)(logical - rbio->bioc->raid_map[0]); in raid56_add_scrub_pages()
2329 rbio->bio_sectors[index].page = page; in raid56_add_scrub_pages()
2330 rbio->bio_sectors[index].pgoff = pgoff; in raid56_add_scrub_pages()
2334 * We just scrub the parity that we have correct data on the same horizontal,
2339 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in alloc_rbio_essential_pages()
2342 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in alloc_rbio_essential_pages()
2345 int sectornr = total_sector_nr % rbio->stripe_nsectors; in alloc_rbio_essential_pages()
2348 if (!test_bit(sectornr, &rbio->dbitmap)) in alloc_rbio_essential_pages()
2350 if (rbio->stripe_pages[index]) in alloc_rbio_essential_pages()
2354 return -ENOMEM; in alloc_rbio_essential_pages()
2355 rbio->stripe_pages[index] = page; in alloc_rbio_essential_pages()
2364 struct btrfs_io_context *bioc = rbio->bioc; in finish_parity_scrub()
2365 const u32 sectorsize = bioc->fs_info->sectorsize; in finish_parity_scrub()
2366 void **pointers = rbio->finish_pointers; in finish_parity_scrub()
2367 unsigned long *pbitmap = &rbio->finish_pbitmap; in finish_parity_scrub()
2368 int nr_data = rbio->nr_data; in finish_parity_scrub()
2381 if (rbio->real_stripes - rbio->nr_data == 1) in finish_parity_scrub()
2383 else if (rbio->real_stripes - rbio->nr_data == 2) in finish_parity_scrub()
2388 if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) { in finish_parity_scrub()
2390 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors); in finish_parity_scrub()
2395 * use this area of the disk again soon, so don't cache in finish_parity_scrub()
2398 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_parity_scrub()
2419 pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page); in finish_parity_scrub()
2422 atomic_set(&rbio->error, 0); in finish_parity_scrub()
2424 /* Map the parity stripe just once */ in finish_parity_scrub()
2427 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2429 void *parity; in finish_parity_scrub() local
2434 pointers[stripe] = kmap_local_page(sector->page) + in finish_parity_scrub()
2435 sector->pgoff; in finish_parity_scrub()
2440 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, in finish_parity_scrub()
2445 run_xor(pointers + 1, nr_data - 1, sectorsize); in finish_parity_scrub()
2448 /* Check scrubbing parity and repair it */ in finish_parity_scrub()
2449 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2450 parity = kmap_local_page(sector->page) + sector->pgoff; in finish_parity_scrub()
2451 if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0) in finish_parity_scrub()
2452 memcpy(parity, pointers[rbio->scrubp], sectorsize); in finish_parity_scrub()
2454 /* Parity is right, needn't writeback */ in finish_parity_scrub()
2455 bitmap_clear(&rbio->dbitmap, sectornr, 1); in finish_parity_scrub()
2456 kunmap_local(parity); in finish_parity_scrub()
2458 for (stripe = nr_data - 1; stripe >= 0; stripe--) in finish_parity_scrub()
2466 kunmap_local(pointers[rbio->real_stripes - 1]); in finish_parity_scrub()
2477 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2480 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2481 ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp, in finish_parity_scrub()
2490 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2493 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2495 bioc->tgtdev_map[rbio->scrubp], in finish_parity_scrub()
2504 /* Every parity is right */ in finish_parity_scrub()
2509 atomic_set(&rbio->stripes_pending, nr_data); in finish_parity_scrub()
2512 bio->bi_end_io = raid_write_end_io; in finish_parity_scrub()
2533 if (stripe >= 0 && stripe < rbio->nr_data) in is_data_stripe()
2539 * While we're doing the parity check and repair, we could have errors
2541 * not able to read the page it'll trigger parity reconstruction. The
2542 * parity scrub will be finished after we've reconstructed the failed
2547 if (atomic_read(&rbio->error) > rbio->bioc->max_errors) in validate_rbio_for_parity_scrub()
2550 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_parity_scrub()
2551 int dfail = 0, failp = -1; in validate_rbio_for_parity_scrub()
2553 if (is_data_stripe(rbio, rbio->faila)) in validate_rbio_for_parity_scrub()
2555 else if (is_parity_stripe(rbio->faila)) in validate_rbio_for_parity_scrub()
2556 failp = rbio->faila; in validate_rbio_for_parity_scrub()
2558 if (is_data_stripe(rbio, rbio->failb)) in validate_rbio_for_parity_scrub()
2560 else if (is_parity_stripe(rbio->failb)) in validate_rbio_for_parity_scrub()
2561 failp = rbio->failb; in validate_rbio_for_parity_scrub()
2564 * Because we can not use a scrubbing parity to repair in validate_rbio_for_parity_scrub()
2568 if (dfail > rbio->bioc->max_errors - 1) in validate_rbio_for_parity_scrub()
2572 * If all data is good, only parity is correctly, just in validate_rbio_for_parity_scrub()
2573 * repair the parity. in validate_rbio_for_parity_scrub()
2582 * corrupted parity on RAID6, if the corrupted parity in validate_rbio_for_parity_scrub()
2583 * is scrubbing parity, luckily, use the other one to repair in validate_rbio_for_parity_scrub()
2586 if (failp != rbio->scrubp) in validate_rbio_for_parity_scrub()
2601 * stripe bios we've read from the disk so we can recalculate the parity of the
2605 * may trigger parity reconstruction if we had any errors along the way
2614 * are any failed stripes we'll reconstruct from parity first in raid56_parity_scrub_end_io_work()
2633 atomic_set(&rbio->error, 0); in raid56_parity_scrub_stripe()
2635 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in raid56_parity_scrub_stripe()
2637 int sectornr = total_sector_nr % rbio->stripe_nsectors; in raid56_parity_scrub_stripe()
2638 int stripe = total_sector_nr / rbio->stripe_nsectors; in raid56_parity_scrub_stripe()
2642 if (!test_bit(sectornr, &rbio->dbitmap)) in raid56_parity_scrub_stripe()
2657 * use it. in raid56_parity_scrub_stripe()
2659 if (sector->uptodate) in raid56_parity_scrub_stripe()
2683 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_parity_scrub_stripe()
2684 INIT_WORK(&rbio->end_io_work, raid56_parity_scrub_end_io_work); in raid56_parity_scrub_stripe()
2686 bio->bi_end_io = raid56_bio_end_io; in raid56_parity_scrub_stripe()
2730 struct btrfs_fs_info *fs_info = bioc->fs_info; in raid56_alloc_missing_rbio()
2737 rbio->operation = BTRFS_RBIO_REBUILD_MISSING; in raid56_alloc_missing_rbio()
2738 bio_list_add(&rbio->bio_list, bio); in raid56_alloc_missing_rbio()
2743 ASSERT(!bio->bi_iter.bi_size); in raid56_alloc_missing_rbio()
2745 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_alloc_missing_rbio()
2746 if (rbio->faila == -1) { in raid56_alloc_missing_rbio()
2749 bioc->raid_map[0]); in raid56_alloc_missing_rbio()