Lines Matching refs:rbio

161 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
162 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
165 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
166 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
167 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
168 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
169 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
171 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
175 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func) in start_async_work() argument
177 btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL); in start_async_work()
178 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); in start_async_work()
236 static void cache_rbio_pages(struct btrfs_raid_bio *rbio) in cache_rbio_pages() argument
243 ret = alloc_rbio_pages(rbio); in cache_rbio_pages()
247 for (i = 0; i < rbio->nr_pages; i++) { in cache_rbio_pages()
248 if (!rbio->bio_pages[i]) in cache_rbio_pages()
251 s = kmap(rbio->bio_pages[i]); in cache_rbio_pages()
252 d = kmap(rbio->stripe_pages[i]); in cache_rbio_pages()
256 kunmap(rbio->bio_pages[i]); in cache_rbio_pages()
257 kunmap(rbio->stripe_pages[i]); in cache_rbio_pages()
258 SetPageUptodate(rbio->stripe_pages[i]); in cache_rbio_pages()
260 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in cache_rbio_pages()
266 static int rbio_bucket(struct btrfs_raid_bio *rbio) in rbio_bucket() argument
268 u64 num = rbio->bbio->raid_map[0]; in rbio_bucket()
329 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in __remove_rbio_from_cache() argument
331 int bucket = rbio_bucket(rbio); in __remove_rbio_from_cache()
339 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in __remove_rbio_from_cache()
342 table = rbio->fs_info->stripe_hash_table; in __remove_rbio_from_cache()
354 spin_lock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
356 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { in __remove_rbio_from_cache()
357 list_del_init(&rbio->stripe_cache); in __remove_rbio_from_cache()
370 if (bio_list_empty(&rbio->bio_list)) { in __remove_rbio_from_cache()
371 if (!list_empty(&rbio->hash_list)) { in __remove_rbio_from_cache()
372 list_del_init(&rbio->hash_list); in __remove_rbio_from_cache()
373 refcount_dec(&rbio->refs); in __remove_rbio_from_cache()
374 BUG_ON(!list_empty(&rbio->plug_list)); in __remove_rbio_from_cache()
379 spin_unlock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
383 __free_raid_bio(rbio); in __remove_rbio_from_cache()
389 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in remove_rbio_from_cache() argument
394 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in remove_rbio_from_cache()
397 table = rbio->fs_info->stripe_hash_table; in remove_rbio_from_cache()
400 __remove_rbio_from_cache(rbio); in remove_rbio_from_cache()
411 struct btrfs_raid_bio *rbio; in btrfs_clear_rbio_cache() local
417 rbio = list_entry(table->stripe_cache.next, in btrfs_clear_rbio_cache()
420 __remove_rbio_from_cache(rbio); in btrfs_clear_rbio_cache()
449 static void cache_rbio(struct btrfs_raid_bio *rbio) in cache_rbio() argument
454 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) in cache_rbio()
457 table = rbio->fs_info->stripe_hash_table; in cache_rbio()
460 spin_lock(&rbio->bio_list_lock); in cache_rbio()
463 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) in cache_rbio()
464 refcount_inc(&rbio->refs); in cache_rbio()
466 if (!list_empty(&rbio->stripe_cache)){ in cache_rbio()
467 list_move(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
469 list_add(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
473 spin_unlock(&rbio->bio_list_lock); in cache_rbio()
482 if (found != rbio) in cache_rbio()
513 static int rbio_is_full(struct btrfs_raid_bio *rbio) in rbio_is_full() argument
516 unsigned long size = rbio->bio_list_bytes; in rbio_is_full()
519 spin_lock_irqsave(&rbio->bio_list_lock, flags); in rbio_is_full()
520 if (size != rbio->nr_data * rbio->stripe_len) in rbio_is_full()
522 BUG_ON(size > rbio->nr_data * rbio->stripe_len); in rbio_is_full()
523 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in rbio_is_full()
599 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe, in rbio_stripe_page_index() argument
602 return stripe * rbio->stripe_npages + index; in rbio_stripe_page_index()
609 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, in rbio_stripe_page() argument
612 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)]; in rbio_stripe_page()
618 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) in rbio_pstripe_page() argument
620 return rbio_stripe_page(rbio, rbio->nr_data, index); in rbio_pstripe_page()
627 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) in rbio_qstripe_page() argument
629 if (rbio->nr_data + 1 == rbio->real_stripes) in rbio_qstripe_page()
631 return rbio_stripe_page(rbio, rbio->nr_data + 1, index); in rbio_qstripe_page()
656 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) in lock_stripe_add() argument
658 int bucket = rbio_bucket(rbio); in lock_stripe_add()
659 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; in lock_stripe_add()
669 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { in lock_stripe_add()
680 steal_rbio(cur, rbio); in lock_stripe_add()
688 if (rbio_can_merge(cur, rbio)) { in lock_stripe_add()
689 merge_rbio(cur, rbio); in lock_stripe_add()
691 freeit = rbio; in lock_stripe_add()
707 if (rbio_can_merge(pending, rbio)) { in lock_stripe_add()
708 merge_rbio(pending, rbio); in lock_stripe_add()
710 freeit = rbio; in lock_stripe_add()
720 list_add_tail(&rbio->plug_list, &cur->plug_list); in lock_stripe_add()
727 refcount_inc(&rbio->refs); in lock_stripe_add()
728 list_add(&rbio->hash_list, &h->hash_list); in lock_stripe_add()
742 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) in unlock_stripe() argument
749 bucket = rbio_bucket(rbio); in unlock_stripe()
750 h = rbio->fs_info->stripe_hash_table->table + bucket; in unlock_stripe()
752 if (list_empty(&rbio->plug_list)) in unlock_stripe()
753 cache_rbio(rbio); in unlock_stripe()
756 spin_lock(&rbio->bio_list_lock); in unlock_stripe()
758 if (!list_empty(&rbio->hash_list)) { in unlock_stripe()
764 if (list_empty(&rbio->plug_list) && in unlock_stripe()
765 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { in unlock_stripe()
767 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in unlock_stripe()
768 BUG_ON(!bio_list_empty(&rbio->bio_list)); in unlock_stripe()
772 list_del_init(&rbio->hash_list); in unlock_stripe()
773 refcount_dec(&rbio->refs); in unlock_stripe()
780 if (!list_empty(&rbio->plug_list)) { in unlock_stripe()
782 struct list_head *head = rbio->plug_list.next; in unlock_stripe()
787 list_del_init(&rbio->plug_list); in unlock_stripe()
791 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
797 steal_rbio(rbio, next); in unlock_stripe()
800 steal_rbio(rbio, next); in unlock_stripe()
803 steal_rbio(rbio, next); in unlock_stripe()
811 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
816 remove_rbio_from_cache(rbio); in unlock_stripe()
819 static void __free_raid_bio(struct btrfs_raid_bio *rbio) in __free_raid_bio() argument
823 if (!refcount_dec_and_test(&rbio->refs)) in __free_raid_bio()
826 WARN_ON(!list_empty(&rbio->stripe_cache)); in __free_raid_bio()
827 WARN_ON(!list_empty(&rbio->hash_list)); in __free_raid_bio()
828 WARN_ON(!bio_list_empty(&rbio->bio_list)); in __free_raid_bio()
830 for (i = 0; i < rbio->nr_pages; i++) { in __free_raid_bio()
831 if (rbio->stripe_pages[i]) { in __free_raid_bio()
832 __free_page(rbio->stripe_pages[i]); in __free_raid_bio()
833 rbio->stripe_pages[i] = NULL; in __free_raid_bio()
837 btrfs_put_bbio(rbio->bbio); in __free_raid_bio()
838 kfree(rbio); in __free_raid_bio()
858 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) in rbio_orig_end_io() argument
860 struct bio *cur = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
863 if (rbio->generic_bio_cnt) in rbio_orig_end_io()
864 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt); in rbio_orig_end_io()
874 unlock_stripe(rbio); in rbio_orig_end_io()
875 extra = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
876 __free_raid_bio(rbio); in rbio_orig_end_io()
889 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_write_end_io() local
894 fail_bio_stripe(rbio, bio); in raid_write_end_io()
898 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_write_end_io()
904 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? in raid_write_end_io()
905 0 : rbio->bbio->max_errors; in raid_write_end_io()
906 if (atomic_read(&rbio->error) > max_errors) in raid_write_end_io()
909 rbio_orig_end_io(rbio, err); in raid_write_end_io()
928 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, in page_in_rbio() argument
934 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr; in page_in_rbio()
936 spin_lock_irq(&rbio->bio_list_lock); in page_in_rbio()
937 p = rbio->bio_pages[chunk_page]; in page_in_rbio()
938 spin_unlock_irq(&rbio->bio_list_lock); in page_in_rbio()
943 return rbio->stripe_pages[chunk_page]; in page_in_rbio()
963 struct btrfs_raid_bio *rbio; in alloc_rbio() local
970 rbio = kzalloc(sizeof(*rbio) + in alloc_rbio()
971 sizeof(*rbio->stripe_pages) * num_pages + in alloc_rbio()
972 sizeof(*rbio->bio_pages) * num_pages + in alloc_rbio()
973 sizeof(*rbio->finish_pointers) * real_stripes + in alloc_rbio()
974 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) + in alloc_rbio()
975 sizeof(*rbio->finish_pbitmap) * in alloc_rbio()
978 if (!rbio) in alloc_rbio()
981 bio_list_init(&rbio->bio_list); in alloc_rbio()
982 INIT_LIST_HEAD(&rbio->plug_list); in alloc_rbio()
983 spin_lock_init(&rbio->bio_list_lock); in alloc_rbio()
984 INIT_LIST_HEAD(&rbio->stripe_cache); in alloc_rbio()
985 INIT_LIST_HEAD(&rbio->hash_list); in alloc_rbio()
986 rbio->bbio = bbio; in alloc_rbio()
987 rbio->fs_info = fs_info; in alloc_rbio()
988 rbio->stripe_len = stripe_len; in alloc_rbio()
989 rbio->nr_pages = num_pages; in alloc_rbio()
990 rbio->real_stripes = real_stripes; in alloc_rbio()
991 rbio->stripe_npages = stripe_npages; in alloc_rbio()
992 rbio->faila = -1; in alloc_rbio()
993 rbio->failb = -1; in alloc_rbio()
994 refcount_set(&rbio->refs, 1); in alloc_rbio()
995 atomic_set(&rbio->error, 0); in alloc_rbio()
996 atomic_set(&rbio->stripes_pending, 0); in alloc_rbio()
1002 p = rbio + 1; in alloc_rbio()
1007 CONSUME_ALLOC(rbio->stripe_pages, num_pages); in alloc_rbio()
1008 CONSUME_ALLOC(rbio->bio_pages, num_pages); in alloc_rbio()
1009 CONSUME_ALLOC(rbio->finish_pointers, real_stripes); in alloc_rbio()
1010 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages)); in alloc_rbio()
1011 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages)); in alloc_rbio()
1021 rbio->nr_data = nr_data; in alloc_rbio()
1022 return rbio; in alloc_rbio()
1026 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_pages() argument
1031 for (i = 0; i < rbio->nr_pages; i++) { in alloc_rbio_pages()
1032 if (rbio->stripe_pages[i]) in alloc_rbio_pages()
1037 rbio->stripe_pages[i] = page; in alloc_rbio_pages()
1043 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_parity_pages() argument
1048 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0); in alloc_rbio_parity_pages()
1050 for (; i < rbio->nr_pages; i++) { in alloc_rbio_parity_pages()
1051 if (rbio->stripe_pages[i]) in alloc_rbio_parity_pages()
1056 rbio->stripe_pages[i] = page; in alloc_rbio_parity_pages()
1066 static int rbio_add_io_page(struct btrfs_raid_bio *rbio, in rbio_add_io_page() argument
1080 stripe = &rbio->bbio->stripes[stripe_nr]; in rbio_add_io_page()
1085 return fail_rbio_index(rbio, stripe_nr); in rbio_add_io_page()
1124 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) in validate_rbio_for_rmw() argument
1126 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_rmw()
1127 BUG_ON(rbio->faila == rbio->real_stripes - 1); in validate_rbio_for_rmw()
1128 __raid56_parity_recover(rbio); in validate_rbio_for_rmw()
1130 finish_rmw(rbio); in validate_rbio_for_rmw()
1142 static void index_rbio_pages(struct btrfs_raid_bio *rbio) in index_rbio_pages() argument
1149 spin_lock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1150 bio_list_for_each(bio, &rbio->bio_list) { in index_rbio_pages()
1156 stripe_offset = start - rbio->bbio->raid_map[0]; in index_rbio_pages()
1163 rbio->bio_pages[page_index + i] = bvec.bv_page; in index_rbio_pages()
1167 spin_unlock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1178 static noinline void finish_rmw(struct btrfs_raid_bio *rbio) in finish_rmw() argument
1180 struct btrfs_bio *bbio = rbio->bbio; in finish_rmw()
1181 void **pointers = rbio->finish_pointers; in finish_rmw()
1182 int nr_data = rbio->nr_data; in finish_rmw()
1193 if (rbio->real_stripes - rbio->nr_data == 1) { in finish_rmw()
1194 p_stripe = rbio->real_stripes - 1; in finish_rmw()
1195 } else if (rbio->real_stripes - rbio->nr_data == 2) { in finish_rmw()
1196 p_stripe = rbio->real_stripes - 2; in finish_rmw()
1197 q_stripe = rbio->real_stripes - 1; in finish_rmw()
1210 spin_lock_irq(&rbio->bio_list_lock); in finish_rmw()
1211 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in finish_rmw()
1212 spin_unlock_irq(&rbio->bio_list_lock); in finish_rmw()
1214 atomic_set(&rbio->error, 0); in finish_rmw()
1225 index_rbio_pages(rbio); in finish_rmw()
1226 if (!rbio_is_full(rbio)) in finish_rmw()
1227 cache_rbio_pages(rbio); in finish_rmw()
1229 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_rmw()
1231 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in finish_rmw()
1235 p = page_in_rbio(rbio, stripe, pagenr, 0); in finish_rmw()
1240 p = rbio_pstripe_page(rbio, pagenr); in finish_rmw()
1250 p = rbio_qstripe_page(rbio, pagenr); in finish_rmw()
1254 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, in finish_rmw()
1263 for (stripe = 0; stripe < rbio->real_stripes; stripe++) in finish_rmw()
1264 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); in finish_rmw()
1272 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in finish_rmw()
1273 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in finish_rmw()
1275 if (stripe < rbio->nr_data) { in finish_rmw()
1276 page = page_in_rbio(rbio, stripe, pagenr, 1); in finish_rmw()
1280 page = rbio_stripe_page(rbio, stripe, pagenr); in finish_rmw()
1283 ret = rbio_add_io_page(rbio, &bio_list, in finish_rmw()
1284 page, stripe, pagenr, rbio->stripe_len); in finish_rmw()
1293 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in finish_rmw()
1297 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in finish_rmw()
1299 if (stripe < rbio->nr_data) { in finish_rmw()
1300 page = page_in_rbio(rbio, stripe, pagenr, 1); in finish_rmw()
1304 page = rbio_stripe_page(rbio, stripe, pagenr); in finish_rmw()
1307 ret = rbio_add_io_page(rbio, &bio_list, page, in finish_rmw()
1308 rbio->bbio->tgtdev_map[stripe], in finish_rmw()
1309 pagenr, rbio->stripe_len); in finish_rmw()
1316 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); in finish_rmw()
1317 BUG_ON(atomic_read(&rbio->stripes_pending) == 0); in finish_rmw()
1324 bio->bi_private = rbio; in finish_rmw()
1333 rbio_orig_end_io(rbio, BLK_STS_IOERR); in finish_rmw()
1344 static int find_bio_stripe(struct btrfs_raid_bio *rbio, in find_bio_stripe() argument
1354 for (i = 0; i < rbio->bbio->num_stripes; i++) { in find_bio_stripe()
1355 stripe = &rbio->bbio->stripes[i]; in find_bio_stripe()
1358 physical < stripe_start + rbio->stripe_len && in find_bio_stripe()
1373 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, in find_logical_bio_stripe() argument
1382 for (i = 0; i < rbio->nr_data; i++) { in find_logical_bio_stripe()
1383 stripe_start = rbio->bbio->raid_map[i]; in find_logical_bio_stripe()
1385 logical < stripe_start + rbio->stripe_len) { in find_logical_bio_stripe()
1395 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) in fail_rbio_index() argument
1400 spin_lock_irqsave(&rbio->bio_list_lock, flags); in fail_rbio_index()
1403 if (rbio->faila == failed || rbio->failb == failed) in fail_rbio_index()
1406 if (rbio->faila == -1) { in fail_rbio_index()
1408 rbio->faila = failed; in fail_rbio_index()
1409 atomic_inc(&rbio->error); in fail_rbio_index()
1410 } else if (rbio->failb == -1) { in fail_rbio_index()
1412 rbio->failb = failed; in fail_rbio_index()
1413 atomic_inc(&rbio->error); in fail_rbio_index()
1418 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in fail_rbio_index()
1427 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, in fail_bio_stripe() argument
1430 int failed = find_bio_stripe(rbio, bio); in fail_bio_stripe()
1435 return fail_rbio_index(rbio, failed); in fail_bio_stripe()
1463 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_rmw_end_io() local
1466 fail_bio_stripe(rbio, bio); in raid_rmw_end_io()
1472 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_rmw_end_io()
1475 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_rmw_end_io()
1483 validate_rbio_for_rmw(rbio); in raid_rmw_end_io()
1488 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid_rmw_end_io()
1495 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) in raid56_rmw_stripe() argument
1506 ret = alloc_rbio_pages(rbio); in raid56_rmw_stripe()
1510 index_rbio_pages(rbio); in raid56_rmw_stripe()
1512 atomic_set(&rbio->error, 0); in raid56_rmw_stripe()
1517 for (stripe = 0; stripe < rbio->nr_data; stripe++) { in raid56_rmw_stripe()
1518 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in raid56_rmw_stripe()
1526 page = page_in_rbio(rbio, stripe, pagenr, 1); in raid56_rmw_stripe()
1530 page = rbio_stripe_page(rbio, stripe, pagenr); in raid56_rmw_stripe()
1538 ret = rbio_add_io_page(rbio, &bio_list, page, in raid56_rmw_stripe()
1539 stripe, pagenr, rbio->stripe_len); in raid56_rmw_stripe()
1560 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_rmw_stripe()
1566 bio->bi_private = rbio; in raid56_rmw_stripe()
1570 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); in raid56_rmw_stripe()
1578 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid56_rmw_stripe()
1586 validate_rbio_for_rmw(rbio); in raid56_rmw_stripe()
1594 static int full_stripe_write(struct btrfs_raid_bio *rbio) in full_stripe_write() argument
1598 ret = alloc_rbio_parity_pages(rbio); in full_stripe_write()
1600 __free_raid_bio(rbio); in full_stripe_write()
1604 ret = lock_stripe_add(rbio); in full_stripe_write()
1606 finish_rmw(rbio); in full_stripe_write()
1615 static int partial_stripe_write(struct btrfs_raid_bio *rbio) in partial_stripe_write() argument
1619 ret = lock_stripe_add(rbio); in partial_stripe_write()
1621 start_async_work(rbio, rmw_work); in partial_stripe_write()
1631 static int __raid56_parity_write(struct btrfs_raid_bio *rbio) in __raid56_parity_write() argument
1634 if (!rbio_is_full(rbio)) in __raid56_parity_write()
1635 return partial_stripe_write(rbio); in __raid56_parity_write()
1636 return full_stripe_write(rbio); in __raid56_parity_write()
1745 struct btrfs_raid_bio *rbio; in raid56_parity_write() local
1750 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_write()
1751 if (IS_ERR(rbio)) { in raid56_parity_write()
1753 return PTR_ERR(rbio); in raid56_parity_write()
1755 bio_list_add(&rbio->bio_list, bio); in raid56_parity_write()
1756 rbio->bio_list_bytes = bio->bi_iter.bi_size; in raid56_parity_write()
1757 rbio->operation = BTRFS_RBIO_WRITE; in raid56_parity_write()
1760 rbio->generic_bio_cnt = 1; in raid56_parity_write()
1766 if (rbio_is_full(rbio)) { in raid56_parity_write()
1767 ret = full_stripe_write(rbio); in raid56_parity_write()
1780 list_add_tail(&rbio->plug_list, &plug->rbio_list); in raid56_parity_write()
1783 ret = __raid56_parity_write(rbio); in raid56_parity_write()
1795 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) in __raid_recover_end_io() argument
1804 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in __raid_recover_end_io()
1810 faila = rbio->faila; in __raid_recover_end_io()
1811 failb = rbio->failb; in __raid_recover_end_io()
1813 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1814 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { in __raid_recover_end_io()
1815 spin_lock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1816 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in __raid_recover_end_io()
1817 spin_unlock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1820 index_rbio_pages(rbio); in __raid_recover_end_io()
1822 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in __raid_recover_end_io()
1827 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && in __raid_recover_end_io()
1828 !test_bit(pagenr, rbio->dbitmap)) in __raid_recover_end_io()
1834 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1839 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1840 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && in __raid_recover_end_io()
1842 page = page_in_rbio(rbio, stripe, pagenr, 0); in __raid_recover_end_io()
1844 page = rbio_stripe_page(rbio, stripe, pagenr); in __raid_recover_end_io()
1850 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { in __raid_recover_end_io()
1856 if (faila == rbio->nr_data) { in __raid_recover_end_io()
1885 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { in __raid_recover_end_io()
1886 if (rbio->bbio->raid_map[faila] == in __raid_recover_end_io()
1898 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { in __raid_recover_end_io()
1899 raid6_datap_recov(rbio->real_stripes, in __raid_recover_end_io()
1902 raid6_2data_recov(rbio->real_stripes, in __raid_recover_end_io()
1913 copy_page(pointers[faila], pointers[rbio->nr_data]); in __raid_recover_end_io()
1917 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) in __raid_recover_end_io()
1919 pointers[rbio->nr_data - 1] = p; in __raid_recover_end_io()
1922 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE); in __raid_recover_end_io()
1930 if (rbio->operation == BTRFS_RBIO_WRITE) { in __raid_recover_end_io()
1931 for (i = 0; i < rbio->stripe_npages; i++) { in __raid_recover_end_io()
1933 page = rbio_stripe_page(rbio, faila, i); in __raid_recover_end_io()
1937 page = rbio_stripe_page(rbio, failb, i); in __raid_recover_end_io()
1942 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1947 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1948 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && in __raid_recover_end_io()
1950 page = page_in_rbio(rbio, stripe, pagenr, 0); in __raid_recover_end_io()
1952 page = rbio_stripe_page(rbio, stripe, pagenr); in __raid_recover_end_io()
1968 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1969 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { in __raid_recover_end_io()
1985 if (err == BLK_STS_OK && rbio->failb < 0) in __raid_recover_end_io()
1986 cache_rbio_pages(rbio); in __raid_recover_end_io()
1988 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in __raid_recover_end_io()
1990 rbio_orig_end_io(rbio, err); in __raid_recover_end_io()
1992 rbio->faila = -1; in __raid_recover_end_io()
1993 rbio->failb = -1; in __raid_recover_end_io()
1995 if (rbio->operation == BTRFS_RBIO_WRITE) in __raid_recover_end_io()
1996 finish_rmw(rbio); in __raid_recover_end_io()
1997 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) in __raid_recover_end_io()
1998 finish_parity_scrub(rbio, 0); in __raid_recover_end_io()
2002 rbio_orig_end_io(rbio, err); in __raid_recover_end_io()
2012 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_recover_end_io() local
2019 fail_bio_stripe(rbio, bio); in raid_recover_end_io()
2024 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_recover_end_io()
2027 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_recover_end_io()
2028 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid_recover_end_io()
2030 __raid_recover_end_io(rbio); in raid_recover_end_io()
2041 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) in __raid56_parity_recover() argument
2052 ret = alloc_rbio_pages(rbio); in __raid56_parity_recover()
2056 atomic_set(&rbio->error, 0); in __raid56_parity_recover()
2063 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid56_parity_recover()
2064 if (rbio->faila == stripe || rbio->failb == stripe) { in __raid56_parity_recover()
2065 atomic_inc(&rbio->error); in __raid56_parity_recover()
2069 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in __raid56_parity_recover()
2076 p = rbio_stripe_page(rbio, stripe, pagenr); in __raid56_parity_recover()
2080 ret = rbio_add_io_page(rbio, &bio_list, in __raid56_parity_recover()
2081 rbio_stripe_page(rbio, stripe, pagenr), in __raid56_parity_recover()
2082 stripe, pagenr, rbio->stripe_len); in __raid56_parity_recover()
2095 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { in __raid56_parity_recover()
2096 __raid_recover_end_io(rbio); in __raid56_parity_recover()
2107 atomic_set(&rbio->stripes_pending, bios_to_read); in __raid56_parity_recover()
2113 bio->bi_private = rbio; in __raid56_parity_recover()
2117 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); in __raid56_parity_recover()
2125 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid56_parity_recover()
2126 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) in __raid56_parity_recover()
2127 rbio_orig_end_io(rbio, BLK_STS_IOERR); in __raid56_parity_recover()
2145 struct btrfs_raid_bio *rbio; in raid56_parity_recover() local
2153 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_recover()
2154 if (IS_ERR(rbio)) { in raid56_parity_recover()
2157 return PTR_ERR(rbio); in raid56_parity_recover()
2160 rbio->operation = BTRFS_RBIO_READ_REBUILD; in raid56_parity_recover()
2161 bio_list_add(&rbio->bio_list, bio); in raid56_parity_recover()
2162 rbio->bio_list_bytes = bio->bi_iter.bi_size; in raid56_parity_recover()
2164 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_parity_recover()
2165 if (rbio->faila == -1) { in raid56_parity_recover()
2172 kfree(rbio); in raid56_parity_recover()
2178 rbio->generic_bio_cnt = 1; in raid56_parity_recover()
2194 rbio->failb = rbio->real_stripes - (mirror_num - 1); in raid56_parity_recover()
2195 ASSERT(rbio->failb > 0); in raid56_parity_recover()
2196 if (rbio->failb <= rbio->faila) in raid56_parity_recover()
2197 rbio->failb--; in raid56_parity_recover()
2200 ret = lock_stripe_add(rbio); in raid56_parity_recover()
2210 __raid56_parity_recover(rbio); in raid56_parity_recover()
2222 struct btrfs_raid_bio *rbio; in rmw_work() local
2224 rbio = container_of(work, struct btrfs_raid_bio, work); in rmw_work()
2225 raid56_rmw_stripe(rbio); in rmw_work()
2230 struct btrfs_raid_bio *rbio; in read_rebuild_work() local
2232 rbio = container_of(work, struct btrfs_raid_bio, work); in read_rebuild_work()
2233 __raid56_parity_recover(rbio); in read_rebuild_work()
2252 struct btrfs_raid_bio *rbio; in raid56_parity_alloc_scrub_rbio() local
2255 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_alloc_scrub_rbio()
2256 if (IS_ERR(rbio)) in raid56_parity_alloc_scrub_rbio()
2258 bio_list_add(&rbio->bio_list, bio); in raid56_parity_alloc_scrub_rbio()
2264 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; in raid56_parity_alloc_scrub_rbio()
2271 for (i = rbio->nr_data; i < rbio->real_stripes; i++) { in raid56_parity_alloc_scrub_rbio()
2273 rbio->scrubp = i; in raid56_parity_alloc_scrub_rbio()
2277 ASSERT(i < rbio->real_stripes); in raid56_parity_alloc_scrub_rbio()
2281 ASSERT(rbio->stripe_npages == stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2282 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2288 rbio->generic_bio_cnt = 1; in raid56_parity_alloc_scrub_rbio()
2290 return rbio; in raid56_parity_alloc_scrub_rbio()
2294 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, in raid56_add_scrub_pages() argument
2300 ASSERT(logical >= rbio->bbio->raid_map[0]); in raid56_add_scrub_pages()
2301 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + in raid56_add_scrub_pages()
2302 rbio->stripe_len * rbio->nr_data); in raid56_add_scrub_pages()
2303 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); in raid56_add_scrub_pages()
2305 rbio->bio_pages[index] = page; in raid56_add_scrub_pages()
2312 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_essential_pages() argument
2319 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { in alloc_rbio_essential_pages()
2320 for (i = 0; i < rbio->real_stripes; i++) { in alloc_rbio_essential_pages()
2321 index = i * rbio->stripe_npages + bit; in alloc_rbio_essential_pages()
2322 if (rbio->stripe_pages[index]) in alloc_rbio_essential_pages()
2328 rbio->stripe_pages[index] = page; in alloc_rbio_essential_pages()
2334 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, in finish_parity_scrub() argument
2337 struct btrfs_bio *bbio = rbio->bbio; in finish_parity_scrub()
2338 void **pointers = rbio->finish_pointers; in finish_parity_scrub()
2339 unsigned long *pbitmap = rbio->finish_pbitmap; in finish_parity_scrub()
2340 int nr_data = rbio->nr_data; in finish_parity_scrub()
2354 if (rbio->real_stripes - rbio->nr_data == 1) { in finish_parity_scrub()
2355 p_stripe = rbio->real_stripes - 1; in finish_parity_scrub()
2356 } else if (rbio->real_stripes - rbio->nr_data == 2) { in finish_parity_scrub()
2357 p_stripe = rbio->real_stripes - 2; in finish_parity_scrub()
2358 q_stripe = rbio->real_stripes - 1; in finish_parity_scrub()
2363 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { in finish_parity_scrub()
2365 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages); in finish_parity_scrub()
2373 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_parity_scrub()
2392 atomic_set(&rbio->error, 0); in finish_parity_scrub()
2394 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2399 p = page_in_rbio(rbio, stripe, pagenr, 0); in finish_parity_scrub()
2414 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, in finish_parity_scrub()
2423 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2425 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) in finish_parity_scrub()
2426 copy_page(parity, pointers[rbio->scrubp]); in finish_parity_scrub()
2429 bitmap_clear(rbio->dbitmap, pagenr, 1); in finish_parity_scrub()
2432 for (stripe = 0; stripe < rbio->real_stripes; stripe++) in finish_parity_scrub()
2433 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); in finish_parity_scrub()
2446 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2449 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2450 ret = rbio_add_io_page(rbio, &bio_list, in finish_parity_scrub()
2451 page, rbio->scrubp, pagenr, rbio->stripe_len); in finish_parity_scrub()
2459 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2462 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2463 ret = rbio_add_io_page(rbio, &bio_list, page, in finish_parity_scrub()
2464 bbio->tgtdev_map[rbio->scrubp], in finish_parity_scrub()
2465 pagenr, rbio->stripe_len); in finish_parity_scrub()
2474 rbio_orig_end_io(rbio, BLK_STS_OK); in finish_parity_scrub()
2478 atomic_set(&rbio->stripes_pending, nr_data); in finish_parity_scrub()
2485 bio->bi_private = rbio; in finish_parity_scrub()
2494 rbio_orig_end_io(rbio, BLK_STS_IOERR); in finish_parity_scrub()
2500 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) in is_data_stripe() argument
2502 if (stripe >= 0 && stripe < rbio->nr_data) in is_data_stripe()
2514 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) in validate_rbio_for_parity_scrub() argument
2516 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in validate_rbio_for_parity_scrub()
2519 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_parity_scrub()
2522 if (is_data_stripe(rbio, rbio->faila)) in validate_rbio_for_parity_scrub()
2524 else if (is_parity_stripe(rbio->faila)) in validate_rbio_for_parity_scrub()
2525 failp = rbio->faila; in validate_rbio_for_parity_scrub()
2527 if (is_data_stripe(rbio, rbio->failb)) in validate_rbio_for_parity_scrub()
2529 else if (is_parity_stripe(rbio->failb)) in validate_rbio_for_parity_scrub()
2530 failp = rbio->failb; in validate_rbio_for_parity_scrub()
2537 if (dfail > rbio->bbio->max_errors - 1) in validate_rbio_for_parity_scrub()
2545 finish_parity_scrub(rbio, 0); in validate_rbio_for_parity_scrub()
2555 if (failp != rbio->scrubp) in validate_rbio_for_parity_scrub()
2558 __raid_recover_end_io(rbio); in validate_rbio_for_parity_scrub()
2560 finish_parity_scrub(rbio, 1); in validate_rbio_for_parity_scrub()
2565 rbio_orig_end_io(rbio, BLK_STS_IOERR); in validate_rbio_for_parity_scrub()
2578 struct btrfs_raid_bio *rbio = bio->bi_private; in raid56_parity_scrub_end_io() local
2581 fail_bio_stripe(rbio, bio); in raid56_parity_scrub_end_io()
2587 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid56_parity_scrub_end_io()
2595 validate_rbio_for_parity_scrub(rbio); in raid56_parity_scrub_end_io()
2598 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) in raid56_parity_scrub_stripe() argument
2609 ret = alloc_rbio_essential_pages(rbio); in raid56_parity_scrub_stripe()
2613 atomic_set(&rbio->error, 0); in raid56_parity_scrub_stripe()
2618 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in raid56_parity_scrub_stripe()
2619 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in raid56_parity_scrub_stripe()
2627 page = page_in_rbio(rbio, stripe, pagenr, 1); in raid56_parity_scrub_stripe()
2631 page = rbio_stripe_page(rbio, stripe, pagenr); in raid56_parity_scrub_stripe()
2639 ret = rbio_add_io_page(rbio, &bio_list, page, in raid56_parity_scrub_stripe()
2640 stripe, pagenr, rbio->stripe_len); in raid56_parity_scrub_stripe()
2661 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_parity_scrub_stripe()
2667 bio->bi_private = rbio; in raid56_parity_scrub_stripe()
2671 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); in raid56_parity_scrub_stripe()
2679 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid56_parity_scrub_stripe()
2687 validate_rbio_for_parity_scrub(rbio); in raid56_parity_scrub_stripe()
2692 struct btrfs_raid_bio *rbio; in scrub_parity_work() local
2694 rbio = container_of(work, struct btrfs_raid_bio, work); in scrub_parity_work()
2695 raid56_parity_scrub_stripe(rbio); in scrub_parity_work()
2698 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) in raid56_parity_submit_scrub_rbio() argument
2700 if (!lock_stripe_add(rbio)) in raid56_parity_submit_scrub_rbio()
2701 start_async_work(rbio, scrub_parity_work); in raid56_parity_submit_scrub_rbio()
2710 struct btrfs_raid_bio *rbio; in raid56_alloc_missing_rbio() local
2712 rbio = alloc_rbio(fs_info, bbio, length); in raid56_alloc_missing_rbio()
2713 if (IS_ERR(rbio)) in raid56_alloc_missing_rbio()
2716 rbio->operation = BTRFS_RBIO_REBUILD_MISSING; in raid56_alloc_missing_rbio()
2717 bio_list_add(&rbio->bio_list, bio); in raid56_alloc_missing_rbio()
2724 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_alloc_missing_rbio()
2725 if (rbio->faila == -1) { in raid56_alloc_missing_rbio()
2727 kfree(rbio); in raid56_alloc_missing_rbio()
2735 rbio->generic_bio_cnt = 1; in raid56_alloc_missing_rbio()
2737 return rbio; in raid56_alloc_missing_rbio()
2740 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio) in raid56_submit_missing_rbio() argument
2742 if (!lock_stripe_add(rbio)) in raid56_submit_missing_rbio()
2743 start_async_work(rbio, read_rebuild_work); in raid56_submit_missing_rbio()