Lines Matching refs:rbio

177 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
178 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
181 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
182 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
183 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
184 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
185 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
187 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
191 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func) in start_async_work() argument
193 btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL); in start_async_work()
194 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); in start_async_work()
252 static void cache_rbio_pages(struct btrfs_raid_bio *rbio) in cache_rbio_pages() argument
259 ret = alloc_rbio_pages(rbio); in cache_rbio_pages()
263 for (i = 0; i < rbio->nr_pages; i++) { in cache_rbio_pages()
264 if (!rbio->bio_pages[i]) in cache_rbio_pages()
267 s = kmap(rbio->bio_pages[i]); in cache_rbio_pages()
268 d = kmap(rbio->stripe_pages[i]); in cache_rbio_pages()
272 kunmap(rbio->bio_pages[i]); in cache_rbio_pages()
273 kunmap(rbio->stripe_pages[i]); in cache_rbio_pages()
274 SetPageUptodate(rbio->stripe_pages[i]); in cache_rbio_pages()
276 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in cache_rbio_pages()
282 static int rbio_bucket(struct btrfs_raid_bio *rbio) in rbio_bucket() argument
284 u64 num = rbio->bbio->raid_map[0]; in rbio_bucket()
345 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in __remove_rbio_from_cache() argument
347 int bucket = rbio_bucket(rbio); in __remove_rbio_from_cache()
355 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in __remove_rbio_from_cache()
358 table = rbio->fs_info->stripe_hash_table; in __remove_rbio_from_cache()
370 spin_lock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
372 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { in __remove_rbio_from_cache()
373 list_del_init(&rbio->stripe_cache); in __remove_rbio_from_cache()
386 if (bio_list_empty(&rbio->bio_list)) { in __remove_rbio_from_cache()
387 if (!list_empty(&rbio->hash_list)) { in __remove_rbio_from_cache()
388 list_del_init(&rbio->hash_list); in __remove_rbio_from_cache()
389 refcount_dec(&rbio->refs); in __remove_rbio_from_cache()
390 BUG_ON(!list_empty(&rbio->plug_list)); in __remove_rbio_from_cache()
395 spin_unlock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
399 __free_raid_bio(rbio); in __remove_rbio_from_cache()
405 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in remove_rbio_from_cache() argument
410 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in remove_rbio_from_cache()
413 table = rbio->fs_info->stripe_hash_table; in remove_rbio_from_cache()
416 __remove_rbio_from_cache(rbio); in remove_rbio_from_cache()
427 struct btrfs_raid_bio *rbio; in btrfs_clear_rbio_cache() local
433 rbio = list_entry(table->stripe_cache.next, in btrfs_clear_rbio_cache()
436 __remove_rbio_from_cache(rbio); in btrfs_clear_rbio_cache()
465 static void cache_rbio(struct btrfs_raid_bio *rbio) in cache_rbio() argument
470 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) in cache_rbio()
473 table = rbio->fs_info->stripe_hash_table; in cache_rbio()
476 spin_lock(&rbio->bio_list_lock); in cache_rbio()
479 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) in cache_rbio()
480 refcount_inc(&rbio->refs); in cache_rbio()
482 if (!list_empty(&rbio->stripe_cache)){ in cache_rbio()
483 list_move(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
485 list_add(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
489 spin_unlock(&rbio->bio_list_lock); in cache_rbio()
498 if (found != rbio) in cache_rbio()
529 static int rbio_is_full(struct btrfs_raid_bio *rbio) in rbio_is_full() argument
532 unsigned long size = rbio->bio_list_bytes; in rbio_is_full()
535 spin_lock_irqsave(&rbio->bio_list_lock, flags); in rbio_is_full()
536 if (size != rbio->nr_data * rbio->stripe_len) in rbio_is_full()
538 BUG_ON(size > rbio->nr_data * rbio->stripe_len); in rbio_is_full()
539 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in rbio_is_full()
615 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe, in rbio_stripe_page_index() argument
618 return stripe * rbio->stripe_npages + index; in rbio_stripe_page_index()
625 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, in rbio_stripe_page() argument
628 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)]; in rbio_stripe_page()
634 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) in rbio_pstripe_page() argument
636 return rbio_stripe_page(rbio, rbio->nr_data, index); in rbio_pstripe_page()
643 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) in rbio_qstripe_page() argument
645 if (rbio->nr_data + 1 == rbio->real_stripes) in rbio_qstripe_page()
647 return rbio_stripe_page(rbio, rbio->nr_data + 1, index); in rbio_qstripe_page()
672 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) in lock_stripe_add() argument
674 int bucket = rbio_bucket(rbio); in lock_stripe_add()
675 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; in lock_stripe_add()
685 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { in lock_stripe_add()
696 steal_rbio(cur, rbio); in lock_stripe_add()
704 if (rbio_can_merge(cur, rbio)) { in lock_stripe_add()
705 merge_rbio(cur, rbio); in lock_stripe_add()
707 freeit = rbio; in lock_stripe_add()
723 if (rbio_can_merge(pending, rbio)) { in lock_stripe_add()
724 merge_rbio(pending, rbio); in lock_stripe_add()
726 freeit = rbio; in lock_stripe_add()
736 list_add_tail(&rbio->plug_list, &cur->plug_list); in lock_stripe_add()
743 refcount_inc(&rbio->refs); in lock_stripe_add()
744 list_add(&rbio->hash_list, &h->hash_list); in lock_stripe_add()
758 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) in unlock_stripe() argument
765 bucket = rbio_bucket(rbio); in unlock_stripe()
766 h = rbio->fs_info->stripe_hash_table->table + bucket; in unlock_stripe()
768 if (list_empty(&rbio->plug_list)) in unlock_stripe()
769 cache_rbio(rbio); in unlock_stripe()
772 spin_lock(&rbio->bio_list_lock); in unlock_stripe()
774 if (!list_empty(&rbio->hash_list)) { in unlock_stripe()
780 if (list_empty(&rbio->plug_list) && in unlock_stripe()
781 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { in unlock_stripe()
783 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in unlock_stripe()
784 BUG_ON(!bio_list_empty(&rbio->bio_list)); in unlock_stripe()
788 list_del_init(&rbio->hash_list); in unlock_stripe()
789 refcount_dec(&rbio->refs); in unlock_stripe()
796 if (!list_empty(&rbio->plug_list)) { in unlock_stripe()
798 struct list_head *head = rbio->plug_list.next; in unlock_stripe()
803 list_del_init(&rbio->plug_list); in unlock_stripe()
807 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
813 steal_rbio(rbio, next); in unlock_stripe()
816 steal_rbio(rbio, next); in unlock_stripe()
819 steal_rbio(rbio, next); in unlock_stripe()
827 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
832 remove_rbio_from_cache(rbio); in unlock_stripe()
835 static void __free_raid_bio(struct btrfs_raid_bio *rbio) in __free_raid_bio() argument
839 if (!refcount_dec_and_test(&rbio->refs)) in __free_raid_bio()
842 WARN_ON(!list_empty(&rbio->stripe_cache)); in __free_raid_bio()
843 WARN_ON(!list_empty(&rbio->hash_list)); in __free_raid_bio()
844 WARN_ON(!bio_list_empty(&rbio->bio_list)); in __free_raid_bio()
846 for (i = 0; i < rbio->nr_pages; i++) { in __free_raid_bio()
847 if (rbio->stripe_pages[i]) { in __free_raid_bio()
848 __free_page(rbio->stripe_pages[i]); in __free_raid_bio()
849 rbio->stripe_pages[i] = NULL; in __free_raid_bio()
853 btrfs_put_bbio(rbio->bbio); in __free_raid_bio()
854 kfree(rbio); in __free_raid_bio()
874 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) in rbio_orig_end_io() argument
876 struct bio *cur = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
879 if (rbio->generic_bio_cnt) in rbio_orig_end_io()
880 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt); in rbio_orig_end_io()
890 unlock_stripe(rbio); in rbio_orig_end_io()
891 extra = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
892 __free_raid_bio(rbio); in rbio_orig_end_io()
905 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_write_end_io() local
910 fail_bio_stripe(rbio, bio); in raid_write_end_io()
914 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_write_end_io()
920 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? in raid_write_end_io()
921 0 : rbio->bbio->max_errors; in raid_write_end_io()
922 if (atomic_read(&rbio->error) > max_errors) in raid_write_end_io()
925 rbio_orig_end_io(rbio, err); in raid_write_end_io()
944 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, in page_in_rbio() argument
950 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr; in page_in_rbio()
952 spin_lock_irq(&rbio->bio_list_lock); in page_in_rbio()
953 p = rbio->bio_pages[chunk_page]; in page_in_rbio()
954 spin_unlock_irq(&rbio->bio_list_lock); in page_in_rbio()
959 return rbio->stripe_pages[chunk_page]; in page_in_rbio()
979 struct btrfs_raid_bio *rbio; in alloc_rbio() local
986 rbio = kzalloc(sizeof(*rbio) + in alloc_rbio()
987 sizeof(*rbio->stripe_pages) * num_pages + in alloc_rbio()
988 sizeof(*rbio->bio_pages) * num_pages + in alloc_rbio()
989 sizeof(*rbio->finish_pointers) * real_stripes + in alloc_rbio()
990 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) + in alloc_rbio()
991 sizeof(*rbio->finish_pbitmap) * in alloc_rbio()
994 if (!rbio) in alloc_rbio()
997 bio_list_init(&rbio->bio_list); in alloc_rbio()
998 INIT_LIST_HEAD(&rbio->plug_list); in alloc_rbio()
999 spin_lock_init(&rbio->bio_list_lock); in alloc_rbio()
1000 INIT_LIST_HEAD(&rbio->stripe_cache); in alloc_rbio()
1001 INIT_LIST_HEAD(&rbio->hash_list); in alloc_rbio()
1002 rbio->bbio = bbio; in alloc_rbio()
1003 rbio->fs_info = fs_info; in alloc_rbio()
1004 rbio->stripe_len = stripe_len; in alloc_rbio()
1005 rbio->nr_pages = num_pages; in alloc_rbio()
1006 rbio->real_stripes = real_stripes; in alloc_rbio()
1007 rbio->stripe_npages = stripe_npages; in alloc_rbio()
1008 rbio->faila = -1; in alloc_rbio()
1009 rbio->failb = -1; in alloc_rbio()
1010 refcount_set(&rbio->refs, 1); in alloc_rbio()
1011 atomic_set(&rbio->error, 0); in alloc_rbio()
1012 atomic_set(&rbio->stripes_pending, 0); in alloc_rbio()
1018 p = rbio + 1; in alloc_rbio()
1023 CONSUME_ALLOC(rbio->stripe_pages, num_pages); in alloc_rbio()
1024 CONSUME_ALLOC(rbio->bio_pages, num_pages); in alloc_rbio()
1025 CONSUME_ALLOC(rbio->finish_pointers, real_stripes); in alloc_rbio()
1026 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages)); in alloc_rbio()
1027 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages)); in alloc_rbio()
1037 rbio->nr_data = nr_data; in alloc_rbio()
1038 return rbio; in alloc_rbio()
1042 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_pages() argument
1047 for (i = 0; i < rbio->nr_pages; i++) { in alloc_rbio_pages()
1048 if (rbio->stripe_pages[i]) in alloc_rbio_pages()
1053 rbio->stripe_pages[i] = page; in alloc_rbio_pages()
1059 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_parity_pages() argument
1064 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0); in alloc_rbio_parity_pages()
1066 for (; i < rbio->nr_pages; i++) { in alloc_rbio_parity_pages()
1067 if (rbio->stripe_pages[i]) in alloc_rbio_parity_pages()
1072 rbio->stripe_pages[i] = page; in alloc_rbio_parity_pages()
1082 static int rbio_add_io_page(struct btrfs_raid_bio *rbio, in rbio_add_io_page() argument
1096 stripe = &rbio->bbio->stripes[stripe_nr]; in rbio_add_io_page()
1101 return fail_rbio_index(rbio, stripe_nr); in rbio_add_io_page()
1140 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) in validate_rbio_for_rmw() argument
1142 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_rmw()
1143 BUG_ON(rbio->faila == rbio->real_stripes - 1); in validate_rbio_for_rmw()
1144 __raid56_parity_recover(rbio); in validate_rbio_for_rmw()
1146 finish_rmw(rbio); in validate_rbio_for_rmw()
1158 static void index_rbio_pages(struct btrfs_raid_bio *rbio) in index_rbio_pages() argument
1165 spin_lock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1166 bio_list_for_each(bio, &rbio->bio_list) { in index_rbio_pages()
1172 stripe_offset = start - rbio->bbio->raid_map[0]; in index_rbio_pages()
1179 rbio->bio_pages[page_index + i] = bvec.bv_page; in index_rbio_pages()
1183 spin_unlock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1194 static noinline void finish_rmw(struct btrfs_raid_bio *rbio) in finish_rmw() argument
1196 struct btrfs_bio *bbio = rbio->bbio; in finish_rmw()
1197 void **pointers = rbio->finish_pointers; in finish_rmw()
1198 int nr_data = rbio->nr_data; in finish_rmw()
1209 if (rbio->real_stripes - rbio->nr_data == 1) { in finish_rmw()
1210 p_stripe = rbio->real_stripes - 1; in finish_rmw()
1211 } else if (rbio->real_stripes - rbio->nr_data == 2) { in finish_rmw()
1212 p_stripe = rbio->real_stripes - 2; in finish_rmw()
1213 q_stripe = rbio->real_stripes - 1; in finish_rmw()
1226 spin_lock_irq(&rbio->bio_list_lock); in finish_rmw()
1227 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in finish_rmw()
1228 spin_unlock_irq(&rbio->bio_list_lock); in finish_rmw()
1230 atomic_set(&rbio->error, 0); in finish_rmw()
1241 index_rbio_pages(rbio); in finish_rmw()
1242 if (!rbio_is_full(rbio)) in finish_rmw()
1243 cache_rbio_pages(rbio); in finish_rmw()
1245 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_rmw()
1247 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in finish_rmw()
1251 p = page_in_rbio(rbio, stripe, pagenr, 0); in finish_rmw()
1256 p = rbio_pstripe_page(rbio, pagenr); in finish_rmw()
1266 p = rbio_qstripe_page(rbio, pagenr); in finish_rmw()
1270 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, in finish_rmw()
1279 for (stripe = 0; stripe < rbio->real_stripes; stripe++) in finish_rmw()
1280 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); in finish_rmw()
1288 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in finish_rmw()
1289 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in finish_rmw()
1291 if (stripe < rbio->nr_data) { in finish_rmw()
1292 page = page_in_rbio(rbio, stripe, pagenr, 1); in finish_rmw()
1296 page = rbio_stripe_page(rbio, stripe, pagenr); in finish_rmw()
1299 ret = rbio_add_io_page(rbio, &bio_list, in finish_rmw()
1300 page, stripe, pagenr, rbio->stripe_len); in finish_rmw()
1309 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in finish_rmw()
1313 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in finish_rmw()
1315 if (stripe < rbio->nr_data) { in finish_rmw()
1316 page = page_in_rbio(rbio, stripe, pagenr, 1); in finish_rmw()
1320 page = rbio_stripe_page(rbio, stripe, pagenr); in finish_rmw()
1323 ret = rbio_add_io_page(rbio, &bio_list, page, in finish_rmw()
1324 rbio->bbio->tgtdev_map[stripe], in finish_rmw()
1325 pagenr, rbio->stripe_len); in finish_rmw()
1332 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); in finish_rmw()
1333 BUG_ON(atomic_read(&rbio->stripes_pending) == 0); in finish_rmw()
1340 bio->bi_private = rbio; in finish_rmw()
1349 rbio_orig_end_io(rbio, BLK_STS_IOERR); in finish_rmw()
1360 static int find_bio_stripe(struct btrfs_raid_bio *rbio, in find_bio_stripe() argument
1370 for (i = 0; i < rbio->bbio->num_stripes; i++) { in find_bio_stripe()
1371 stripe = &rbio->bbio->stripes[i]; in find_bio_stripe()
1374 physical < stripe_start + rbio->stripe_len && in find_bio_stripe()
1389 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, in find_logical_bio_stripe() argument
1398 for (i = 0; i < rbio->nr_data; i++) { in find_logical_bio_stripe()
1399 stripe_start = rbio->bbio->raid_map[i]; in find_logical_bio_stripe()
1401 logical < stripe_start + rbio->stripe_len) { in find_logical_bio_stripe()
1411 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) in fail_rbio_index() argument
1416 spin_lock_irqsave(&rbio->bio_list_lock, flags); in fail_rbio_index()
1419 if (rbio->faila == failed || rbio->failb == failed) in fail_rbio_index()
1422 if (rbio->faila == -1) { in fail_rbio_index()
1424 rbio->faila = failed; in fail_rbio_index()
1425 atomic_inc(&rbio->error); in fail_rbio_index()
1426 } else if (rbio->failb == -1) { in fail_rbio_index()
1428 rbio->failb = failed; in fail_rbio_index()
1429 atomic_inc(&rbio->error); in fail_rbio_index()
1434 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in fail_rbio_index()
1443 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, in fail_bio_stripe() argument
1446 int failed = find_bio_stripe(rbio, bio); in fail_bio_stripe()
1451 return fail_rbio_index(rbio, failed); in fail_bio_stripe()
1479 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_rmw_end_io() local
1482 fail_bio_stripe(rbio, bio); in raid_rmw_end_io()
1488 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_rmw_end_io()
1491 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_rmw_end_io()
1499 validate_rbio_for_rmw(rbio); in raid_rmw_end_io()
1504 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid_rmw_end_io()
1511 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) in raid56_rmw_stripe() argument
1522 ret = alloc_rbio_pages(rbio); in raid56_rmw_stripe()
1526 index_rbio_pages(rbio); in raid56_rmw_stripe()
1528 atomic_set(&rbio->error, 0); in raid56_rmw_stripe()
1533 for (stripe = 0; stripe < rbio->nr_data; stripe++) { in raid56_rmw_stripe()
1534 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in raid56_rmw_stripe()
1542 page = page_in_rbio(rbio, stripe, pagenr, 1); in raid56_rmw_stripe()
1546 page = rbio_stripe_page(rbio, stripe, pagenr); in raid56_rmw_stripe()
1554 ret = rbio_add_io_page(rbio, &bio_list, page, in raid56_rmw_stripe()
1555 stripe, pagenr, rbio->stripe_len); in raid56_rmw_stripe()
1576 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_rmw_stripe()
1582 bio->bi_private = rbio; in raid56_rmw_stripe()
1586 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); in raid56_rmw_stripe()
1594 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid56_rmw_stripe()
1602 validate_rbio_for_rmw(rbio); in raid56_rmw_stripe()
1610 static int full_stripe_write(struct btrfs_raid_bio *rbio) in full_stripe_write() argument
1614 ret = alloc_rbio_parity_pages(rbio); in full_stripe_write()
1616 __free_raid_bio(rbio); in full_stripe_write()
1620 ret = lock_stripe_add(rbio); in full_stripe_write()
1622 finish_rmw(rbio); in full_stripe_write()
1631 static int partial_stripe_write(struct btrfs_raid_bio *rbio) in partial_stripe_write() argument
1635 ret = lock_stripe_add(rbio); in partial_stripe_write()
1637 start_async_work(rbio, rmw_work); in partial_stripe_write()
1647 static int __raid56_parity_write(struct btrfs_raid_bio *rbio) in __raid56_parity_write() argument
1650 if (!rbio_is_full(rbio)) in __raid56_parity_write()
1651 return partial_stripe_write(rbio); in __raid56_parity_write()
1652 return full_stripe_write(rbio); in __raid56_parity_write()
1761 struct btrfs_raid_bio *rbio; in raid56_parity_write() local
1766 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_write()
1767 if (IS_ERR(rbio)) { in raid56_parity_write()
1769 return PTR_ERR(rbio); in raid56_parity_write()
1771 bio_list_add(&rbio->bio_list, bio); in raid56_parity_write()
1772 rbio->bio_list_bytes = bio->bi_iter.bi_size; in raid56_parity_write()
1773 rbio->operation = BTRFS_RBIO_WRITE; in raid56_parity_write()
1776 rbio->generic_bio_cnt = 1; in raid56_parity_write()
1782 if (rbio_is_full(rbio)) { in raid56_parity_write()
1783 ret = full_stripe_write(rbio); in raid56_parity_write()
1796 list_add_tail(&rbio->plug_list, &plug->rbio_list); in raid56_parity_write()
1799 ret = __raid56_parity_write(rbio); in raid56_parity_write()
1811 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) in __raid_recover_end_io() argument
1820 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in __raid_recover_end_io()
1826 faila = rbio->faila; in __raid_recover_end_io()
1827 failb = rbio->failb; in __raid_recover_end_io()
1829 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1830 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { in __raid_recover_end_io()
1831 spin_lock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1832 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in __raid_recover_end_io()
1833 spin_unlock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1836 index_rbio_pages(rbio); in __raid_recover_end_io()
1838 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in __raid_recover_end_io()
1843 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && in __raid_recover_end_io()
1844 !test_bit(pagenr, rbio->dbitmap)) in __raid_recover_end_io()
1850 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1855 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1856 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && in __raid_recover_end_io()
1858 page = page_in_rbio(rbio, stripe, pagenr, 0); in __raid_recover_end_io()
1860 page = rbio_stripe_page(rbio, stripe, pagenr); in __raid_recover_end_io()
1866 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { in __raid_recover_end_io()
1872 if (faila == rbio->nr_data) { in __raid_recover_end_io()
1901 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { in __raid_recover_end_io()
1902 if (rbio->bbio->raid_map[faila] == in __raid_recover_end_io()
1914 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { in __raid_recover_end_io()
1915 raid6_datap_recov(rbio->real_stripes, in __raid_recover_end_io()
1918 raid6_2data_recov(rbio->real_stripes, in __raid_recover_end_io()
1929 copy_page(pointers[faila], pointers[rbio->nr_data]); in __raid_recover_end_io()
1933 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) in __raid_recover_end_io()
1935 pointers[rbio->nr_data - 1] = p; in __raid_recover_end_io()
1938 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE); in __raid_recover_end_io()
1946 if (rbio->operation == BTRFS_RBIO_WRITE) { in __raid_recover_end_io()
1947 for (i = 0; i < rbio->stripe_npages; i++) { in __raid_recover_end_io()
1949 page = rbio_stripe_page(rbio, faila, i); in __raid_recover_end_io()
1953 page = rbio_stripe_page(rbio, failb, i); in __raid_recover_end_io()
1958 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1963 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1964 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && in __raid_recover_end_io()
1966 page = page_in_rbio(rbio, stripe, pagenr, 0); in __raid_recover_end_io()
1968 page = rbio_stripe_page(rbio, stripe, pagenr); in __raid_recover_end_io()
1984 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1985 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { in __raid_recover_end_io()
2001 if (err == BLK_STS_OK && rbio->failb < 0) in __raid_recover_end_io()
2002 cache_rbio_pages(rbio); in __raid_recover_end_io()
2004 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in __raid_recover_end_io()
2006 rbio_orig_end_io(rbio, err); in __raid_recover_end_io()
2008 rbio->faila = -1; in __raid_recover_end_io()
2009 rbio->failb = -1; in __raid_recover_end_io()
2011 if (rbio->operation == BTRFS_RBIO_WRITE) in __raid_recover_end_io()
2012 finish_rmw(rbio); in __raid_recover_end_io()
2013 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) in __raid_recover_end_io()
2014 finish_parity_scrub(rbio, 0); in __raid_recover_end_io()
2018 rbio_orig_end_io(rbio, err); in __raid_recover_end_io()
2028 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_recover_end_io() local
2035 fail_bio_stripe(rbio, bio); in raid_recover_end_io()
2040 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_recover_end_io()
2043 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_recover_end_io()
2044 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid_recover_end_io()
2046 __raid_recover_end_io(rbio); in raid_recover_end_io()
2057 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) in __raid56_parity_recover() argument
2068 ret = alloc_rbio_pages(rbio); in __raid56_parity_recover()
2072 atomic_set(&rbio->error, 0); in __raid56_parity_recover()
2079 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid56_parity_recover()
2080 if (rbio->faila == stripe || rbio->failb == stripe) { in __raid56_parity_recover()
2081 atomic_inc(&rbio->error); in __raid56_parity_recover()
2085 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in __raid56_parity_recover()
2092 p = rbio_stripe_page(rbio, stripe, pagenr); in __raid56_parity_recover()
2096 ret = rbio_add_io_page(rbio, &bio_list, in __raid56_parity_recover()
2097 rbio_stripe_page(rbio, stripe, pagenr), in __raid56_parity_recover()
2098 stripe, pagenr, rbio->stripe_len); in __raid56_parity_recover()
2111 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { in __raid56_parity_recover()
2112 __raid_recover_end_io(rbio); in __raid56_parity_recover()
2123 atomic_set(&rbio->stripes_pending, bios_to_read); in __raid56_parity_recover()
2129 bio->bi_private = rbio; in __raid56_parity_recover()
2133 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); in __raid56_parity_recover()
2141 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid56_parity_recover()
2142 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) in __raid56_parity_recover()
2143 rbio_orig_end_io(rbio, BLK_STS_IOERR); in __raid56_parity_recover()
2161 struct btrfs_raid_bio *rbio; in raid56_parity_recover() local
2169 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_recover()
2170 if (IS_ERR(rbio)) { in raid56_parity_recover()
2173 return PTR_ERR(rbio); in raid56_parity_recover()
2176 rbio->operation = BTRFS_RBIO_READ_REBUILD; in raid56_parity_recover()
2177 bio_list_add(&rbio->bio_list, bio); in raid56_parity_recover()
2178 rbio->bio_list_bytes = bio->bi_iter.bi_size; in raid56_parity_recover()
2180 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_parity_recover()
2181 if (rbio->faila == -1) { in raid56_parity_recover()
2188 kfree(rbio); in raid56_parity_recover()
2194 rbio->generic_bio_cnt = 1; in raid56_parity_recover()
2210 rbio->failb = rbio->real_stripes - (mirror_num - 1); in raid56_parity_recover()
2211 ASSERT(rbio->failb > 0); in raid56_parity_recover()
2212 if (rbio->failb <= rbio->faila) in raid56_parity_recover()
2213 rbio->failb--; in raid56_parity_recover()
2216 ret = lock_stripe_add(rbio); in raid56_parity_recover()
2226 __raid56_parity_recover(rbio); in raid56_parity_recover()
2238 struct btrfs_raid_bio *rbio; in rmw_work() local
2240 rbio = container_of(work, struct btrfs_raid_bio, work); in rmw_work()
2241 raid56_rmw_stripe(rbio); in rmw_work()
2246 struct btrfs_raid_bio *rbio; in read_rebuild_work() local
2248 rbio = container_of(work, struct btrfs_raid_bio, work); in read_rebuild_work()
2249 __raid56_parity_recover(rbio); in read_rebuild_work()
2268 struct btrfs_raid_bio *rbio; in raid56_parity_alloc_scrub_rbio() local
2271 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_alloc_scrub_rbio()
2272 if (IS_ERR(rbio)) in raid56_parity_alloc_scrub_rbio()
2274 bio_list_add(&rbio->bio_list, bio); in raid56_parity_alloc_scrub_rbio()
2280 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; in raid56_parity_alloc_scrub_rbio()
2287 for (i = rbio->nr_data; i < rbio->real_stripes; i++) { in raid56_parity_alloc_scrub_rbio()
2289 rbio->scrubp = i; in raid56_parity_alloc_scrub_rbio()
2293 ASSERT(i < rbio->real_stripes); in raid56_parity_alloc_scrub_rbio()
2297 ASSERT(rbio->stripe_npages == stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2298 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2304 rbio->generic_bio_cnt = 1; in raid56_parity_alloc_scrub_rbio()
2306 return rbio; in raid56_parity_alloc_scrub_rbio()
2310 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, in raid56_add_scrub_pages() argument
2316 ASSERT(logical >= rbio->bbio->raid_map[0]); in raid56_add_scrub_pages()
2317 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + in raid56_add_scrub_pages()
2318 rbio->stripe_len * rbio->nr_data); in raid56_add_scrub_pages()
2319 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); in raid56_add_scrub_pages()
2321 rbio->bio_pages[index] = page; in raid56_add_scrub_pages()
2328 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_essential_pages() argument
2335 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { in alloc_rbio_essential_pages()
2336 for (i = 0; i < rbio->real_stripes; i++) { in alloc_rbio_essential_pages()
2337 index = i * rbio->stripe_npages + bit; in alloc_rbio_essential_pages()
2338 if (rbio->stripe_pages[index]) in alloc_rbio_essential_pages()
2344 rbio->stripe_pages[index] = page; in alloc_rbio_essential_pages()
2350 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, in finish_parity_scrub() argument
2353 struct btrfs_bio *bbio = rbio->bbio; in finish_parity_scrub()
2354 void **pointers = rbio->finish_pointers; in finish_parity_scrub()
2355 unsigned long *pbitmap = rbio->finish_pbitmap; in finish_parity_scrub()
2356 int nr_data = rbio->nr_data; in finish_parity_scrub()
2370 if (rbio->real_stripes - rbio->nr_data == 1) { in finish_parity_scrub()
2371 p_stripe = rbio->real_stripes - 1; in finish_parity_scrub()
2372 } else if (rbio->real_stripes - rbio->nr_data == 2) { in finish_parity_scrub()
2373 p_stripe = rbio->real_stripes - 2; in finish_parity_scrub()
2374 q_stripe = rbio->real_stripes - 1; in finish_parity_scrub()
2379 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { in finish_parity_scrub()
2381 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages); in finish_parity_scrub()
2389 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_parity_scrub()
2408 atomic_set(&rbio->error, 0); in finish_parity_scrub()
2410 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2415 p = page_in_rbio(rbio, stripe, pagenr, 0); in finish_parity_scrub()
2430 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, in finish_parity_scrub()
2439 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2441 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) in finish_parity_scrub()
2442 copy_page(parity, pointers[rbio->scrubp]); in finish_parity_scrub()
2445 bitmap_clear(rbio->dbitmap, pagenr, 1); in finish_parity_scrub()
2449 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); in finish_parity_scrub()
2463 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2466 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2467 ret = rbio_add_io_page(rbio, &bio_list, in finish_parity_scrub()
2468 page, rbio->scrubp, pagenr, rbio->stripe_len); in finish_parity_scrub()
2476 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2479 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2480 ret = rbio_add_io_page(rbio, &bio_list, page, in finish_parity_scrub()
2481 bbio->tgtdev_map[rbio->scrubp], in finish_parity_scrub()
2482 pagenr, rbio->stripe_len); in finish_parity_scrub()
2491 rbio_orig_end_io(rbio, BLK_STS_OK); in finish_parity_scrub()
2495 atomic_set(&rbio->stripes_pending, nr_data); in finish_parity_scrub()
2502 bio->bi_private = rbio; in finish_parity_scrub()
2511 rbio_orig_end_io(rbio, BLK_STS_IOERR); in finish_parity_scrub()
2517 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) in is_data_stripe() argument
2519 if (stripe >= 0 && stripe < rbio->nr_data) in is_data_stripe()
2531 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) in validate_rbio_for_parity_scrub() argument
2533 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in validate_rbio_for_parity_scrub()
2536 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_parity_scrub()
2539 if (is_data_stripe(rbio, rbio->faila)) in validate_rbio_for_parity_scrub()
2541 else if (is_parity_stripe(rbio->faila)) in validate_rbio_for_parity_scrub()
2542 failp = rbio->faila; in validate_rbio_for_parity_scrub()
2544 if (is_data_stripe(rbio, rbio->failb)) in validate_rbio_for_parity_scrub()
2546 else if (is_parity_stripe(rbio->failb)) in validate_rbio_for_parity_scrub()
2547 failp = rbio->failb; in validate_rbio_for_parity_scrub()
2554 if (dfail > rbio->bbio->max_errors - 1) in validate_rbio_for_parity_scrub()
2562 finish_parity_scrub(rbio, 0); in validate_rbio_for_parity_scrub()
2572 if (failp != rbio->scrubp) in validate_rbio_for_parity_scrub()
2575 __raid_recover_end_io(rbio); in validate_rbio_for_parity_scrub()
2577 finish_parity_scrub(rbio, 1); in validate_rbio_for_parity_scrub()
2582 rbio_orig_end_io(rbio, BLK_STS_IOERR); in validate_rbio_for_parity_scrub()
2595 struct btrfs_raid_bio *rbio = bio->bi_private; in raid56_parity_scrub_end_io() local
2598 fail_bio_stripe(rbio, bio); in raid56_parity_scrub_end_io()
2604 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid56_parity_scrub_end_io()
2612 validate_rbio_for_parity_scrub(rbio); in raid56_parity_scrub_end_io()
2615 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) in raid56_parity_scrub_stripe() argument
2626 ret = alloc_rbio_essential_pages(rbio); in raid56_parity_scrub_stripe()
2630 atomic_set(&rbio->error, 0); in raid56_parity_scrub_stripe()
2635 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in raid56_parity_scrub_stripe()
2636 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in raid56_parity_scrub_stripe()
2644 page = page_in_rbio(rbio, stripe, pagenr, 1); in raid56_parity_scrub_stripe()
2648 page = rbio_stripe_page(rbio, stripe, pagenr); in raid56_parity_scrub_stripe()
2656 ret = rbio_add_io_page(rbio, &bio_list, page, in raid56_parity_scrub_stripe()
2657 stripe, pagenr, rbio->stripe_len); in raid56_parity_scrub_stripe()
2678 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_parity_scrub_stripe()
2684 bio->bi_private = rbio; in raid56_parity_scrub_stripe()
2688 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); in raid56_parity_scrub_stripe()
2696 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid56_parity_scrub_stripe()
2704 validate_rbio_for_parity_scrub(rbio); in raid56_parity_scrub_stripe()
2709 struct btrfs_raid_bio *rbio; in scrub_parity_work() local
2711 rbio = container_of(work, struct btrfs_raid_bio, work); in scrub_parity_work()
2712 raid56_parity_scrub_stripe(rbio); in scrub_parity_work()
2715 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) in raid56_parity_submit_scrub_rbio() argument
2717 if (!lock_stripe_add(rbio)) in raid56_parity_submit_scrub_rbio()
2718 start_async_work(rbio, scrub_parity_work); in raid56_parity_submit_scrub_rbio()
2727 struct btrfs_raid_bio *rbio; in raid56_alloc_missing_rbio() local
2729 rbio = alloc_rbio(fs_info, bbio, length); in raid56_alloc_missing_rbio()
2730 if (IS_ERR(rbio)) in raid56_alloc_missing_rbio()
2733 rbio->operation = BTRFS_RBIO_REBUILD_MISSING; in raid56_alloc_missing_rbio()
2734 bio_list_add(&rbio->bio_list, bio); in raid56_alloc_missing_rbio()
2741 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_alloc_missing_rbio()
2742 if (rbio->faila == -1) { in raid56_alloc_missing_rbio()
2744 kfree(rbio); in raid56_alloc_missing_rbio()
2752 rbio->generic_bio_cnt = 1; in raid56_alloc_missing_rbio()
2754 return rbio; in raid56_alloc_missing_rbio()
2757 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio) in raid56_submit_missing_rbio() argument
2759 if (!lock_stripe_add(rbio)) in raid56_submit_missing_rbio()
2760 start_async_work(rbio, read_rebuild_work); in raid56_submit_missing_rbio()